gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Instrument test cases.
"""
import pytest
import tvm
import tvm.relay
from tvm.relay import op
from tvm.ir.instrument import PassTimingInstrument, pass_instrument
def get_test_model():
x, y, z = [tvm.relay.var(c, shape=(3, 4), dtype="float32") for c in "xyz"]
e1 = op.add(x, y)
e2 = op.subtract(x, z)
e3 = op.multiply(e1, e1 / e2)
return tvm.IRModule.from_expr(e3 + e2)
def test_pass_timing_instrument():
pass_timing = PassTimingInstrument()
# Override current PassContext's instruments
tvm.transform.PassContext.current().override_instruments([pass_timing])
mod = get_test_model()
mod = tvm.relay.transform.AnnotateSpans()(mod)
mod = tvm.relay.transform.ToANormalForm()(mod)
mod = tvm.relay.transform.InferType()(mod)
profiles = pass_timing.render()
assert "AnnotateSpans" in profiles
assert "ToANormalForm" in profiles
assert "InferType" in profiles
# Reset current PassContext's instruments to None
tvm.transform.PassContext.current().override_instruments(None)
mod = get_test_model()
mod = tvm.relay.transform.AnnotateSpans()(mod)
mod = tvm.relay.transform.ToANormalForm()(mod)
mod = tvm.relay.transform.InferType()(mod)
profiles = pass_timing.render()
assert profiles == ""
instrument_definition_type = tvm.testing.parameter("decorator", "subclass")
def test_custom_instrument(instrument_definition_type):
class BaseTest:
def __init__(self):
self.events = []
def enter_pass_ctx(self):
self.events.append("enter ctx")
def exit_pass_ctx(self):
self.events.append("exit ctx")
def run_before_pass(self, mod, info):
self.events.append("run before " + info.name)
def run_after_pass(self, mod, info):
self.events.append("run after " + info.name)
if instrument_definition_type == "decorator":
MyTest = pass_instrument(BaseTest)
elif instrument_definition_type == "subclass":
class MyTest(BaseTest, tvm.ir.instrument.PassInstrument):
def __init__(self):
BaseTest.__init__(self)
tvm.ir.instrument.PassInstrument.__init__(self)
mod = get_test_model()
my_test = MyTest()
with tvm.transform.PassContext(instruments=[my_test]):
mod = tvm.relay.transform.InferType()(mod)
assert (
"enter ctx"
"run before InferType"
"run after InferType"
"exit ctx" == "".join(my_test.events)
)
def test_disable_pass():
@pass_instrument
class CustomPI:
def __init__(self):
self.events = []
def should_run(self, mod, info):
# Only run pass name contains "InferType"
if "InferType" not in info.name:
return False
return True
def run_before_pass(self, mod, info):
self.events.append(info.name)
mod = get_test_model()
custom_pi = CustomPI()
with tvm.transform.PassContext(instruments=[custom_pi]):
mod = tvm.relay.transform.AnnotateSpans()(mod)
mod = tvm.relay.transform.ToANormalForm()(mod)
mod = tvm.relay.transform.InferType()(mod)
assert "InferType" == "".join(custom_pi.events)
def test_multiple_instrument():
@pass_instrument
class SkipPass:
def __init__(self, skip_pass_name):
self.skip_pass_name = skip_pass_name
def should_run(self, mod, info):
if self.skip_pass_name in info.name:
return False
return True
skip_annotate = SkipPass("AnnotateSpans")
skip_anf = SkipPass("ToANormalForm")
@pass_instrument
class PrintPassName:
def __init__(self):
self.events = []
def run_before_pass(self, mod, info):
self.events.append(info.name)
mod = get_test_model()
print_pass_name = PrintPassName()
with tvm.transform.PassContext(instruments=[skip_annotate, skip_anf, print_pass_name]):
mod = tvm.relay.transform.AnnotateSpans()(mod)
mod = tvm.relay.transform.ToANormalForm()(mod)
mod = tvm.relay.transform.InferType()(mod)
assert "InferType" == "".join(print_pass_name.events)
def test_instrument_pass_counts():
@pass_instrument
class PassesCounter:
def __init__(self):
self.run_before_count = 0
self.run_after_count = 0
def __clear(self):
self.run_before_count = 0
self.run_after_count = 0
def enter_pass_ctx(self):
self.__clear()
def exit_pass_ctx(self):
self.__clear()
def run_before_pass(self, mod, info):
self.run_before_count = self.run_before_count + 1
def run_after_pass(self, mod, info):
self.run_after_count = self.run_after_count + 1
mod = get_test_model()
passes_counter = PassesCounter()
with tvm.transform.PassContext(instruments=[passes_counter]):
tvm.relay.build(mod, "llvm")
assert passes_counter.run_after_count != 0
assert passes_counter.run_after_count == passes_counter.run_before_count
# Out of pass context scope, should be reset
assert passes_counter.run_before_count == 0
assert passes_counter.run_after_count == 0
def test_list_pass_configs():
configs = tvm.transform.PassContext.list_configs()
assert len(configs) > 0
assert "relay.backend.use_auto_scheduler" in configs.keys()
assert configs["relay.backend.use_auto_scheduler"]["type"] == "IntImm"
def test_enter_pass_ctx_exception():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def enter_pass_ctx(self):
events.append(self.id + " enter ctx")
def exit_pass_ctx(self):
events.append(self.id + " exit ctx")
@pass_instrument
class PIBroken(PI):
def __init__(self, id):
super().__init__(id)
def enter_pass_ctx(self):
events.append(self.id + " enter ctx")
raise RuntimeError("Just a dummy error")
pass_ctx = tvm.transform.PassContext(instruments=[PI("%1"), PIBroken("%2"), PI("%3")])
with pytest.raises(tvm.error.TVMError) as cm:
with pass_ctx:
pass
assert "Just a dummy error" in str(cm.execption)
assert "%1 enter ctx" "%2 enter ctx" "%1 exit ctx" == "".join(events)
# Make sure we get correct PassContext
cur_pass_ctx = tvm.transform.PassContext.current()
assert pass_ctx != cur_pass_ctx
assert not cur_pass_ctx.instruments
def test_enter_pass_ctx_exception_global():
@pass_instrument
class PIBroken:
def enter_pass_ctx(self):
raise RuntimeError("Just a dummy error")
cur_pass_ctx = tvm.transform.PassContext.current()
with pytest.raises(tvm.error.TVMError) as cm:
cur_pass_ctx.override_instruments([PIBroken()])
assert "Just a dummy error" in str(cm.exception)
assert not cur_pass_ctx.instruments
def test_exit_pass_ctx_exception():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def exit_pass_ctx(self):
events.append(self.id + " exit ctx")
@pass_instrument
class PIBroken(PI):
def __init__(self, id):
super().__init__(id)
def exit_pass_ctx(self):
events.append(self.id + " exit ctx")
raise RuntimeError("Just a dummy error")
pass_ctx = tvm.transform.PassContext(instruments=[PI("%1"), PIBroken("%2"), PI("%3")])
with pytest.raises(tvm.error.TVMError) as cm:
with pass_ctx:
pass
assert "Just a dummy error" in str(cm.exception)
assert "%1 exit ctx" "%2 exit ctx" == "".join(events)
# Make sure we get correct PassContext
cur_pass_ctx = tvm.transform.PassContext.current()
assert pass_ctx != cur_pass_ctx
assert not cur_pass_ctx.instruments
def test_exit_pass_ctx_exception_global():
@pass_instrument
class PIBroken:
def exit_pass_ctx(self):
raise RuntimeError("Just a dummy error")
cur_pass_ctx = tvm.transform.PassContext.current()
with pytest.raises(tvm.error.TVMError) as cm:
cur_pass_ctx.override_instruments([PIBroken()])
cur_pass_ctx.override_instruments([PIBroken()])
assert "Just a dummy error" in str(cm.exception)
assert not cur_pass_ctx.instruments
def test_pass_exception():
events = []
@pass_instrument
class PI:
def enter_pass_ctx(self):
events.append("enter_pass_ctx")
def exit_pass_ctx(self):
events.append("exit_pass_ctx")
def should_run(self, mod, info):
events.append("should_run")
return True
def run_before_pass(self, mod, info):
events.append("run_before_pass")
def run_after_pass(self, mod, info):
events.append("run_after_pass")
@tvm.transform.module_pass(opt_level=2)
def transform(mod, ctx):
events.append("transform pass")
raise RuntimeError("Just a dummy error")
return mod
mod = get_test_model()
with pytest.raises(tvm.error.TVMError) as cm:
with tvm.transform.PassContext(instruments=[PI()]):
mod = transform(mod)
assert "Just a dummy error" in str(cm.exception)
assert (
"enter_pass_ctx"
"should_run"
"run_before_pass"
"transform pass"
"exit_pass_ctx" == "".join(events)
)
def test_should_run_exception():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def enter_pass_ctx(self):
events.append(self.id + " enter_pass_ctx")
def exit_pass_ctx(self):
events.append(self.id + " exit_pass_ctx")
def should_run(self, mod, info):
events.append(self.id + " should_run")
raise RuntimeError("Just a dummy error")
return True
def run_before_pass(self, mod, info):
events.append(self.id + " run_before_pass")
def run_after_pass(self, mod, info):
events.append(self.id + " run_after_pass")
@tvm.transform.module_pass(opt_level=2)
def transform(mod, ctx):
events.append("transform pass")
return mod
mod = get_test_model()
with pytest.raises(tvm.error.TVMError) as cm:
with tvm.transform.PassContext(instruments=[PI("%1"), PI("%2")]):
mod = transform(mod)
assert "Just a dummy error" in str(cm.exception)
assert (
"%1 enter_pass_ctx"
"%2 enter_pass_ctx"
"%1 should_run"
"%1 exit_pass_ctx"
"%2 exit_pass_ctx" == "".join(events)
)
def test_run_before_exception():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def enter_pass_ctx(self):
events.append(self.id + " enter_pass_ctx")
def exit_pass_ctx(self):
events.append(self.id + " exit_pass_ctx")
def should_run(self, mod, info):
events.append(self.id + " should_run")
return True
def run_before_pass(self, mod, info):
events.append(self.id + " run_before_pass")
raise RuntimeError("Just a dummy error")
def run_after_pass(self, mod, info):
events.append(self.id + " run_after_pass")
@tvm.transform.module_pass(opt_level=2)
def transform(mod, ctx):
events.append("transform pass")
return mod
mod = get_test_model()
with pytest.raises(tvm.error.TVMError) as cm:
with tvm.transform.PassContext(instruments=[PI("%1"), PI("%2")]):
mod = transform(mod)
assert "Just a dummy error" in str(cm.exception)
assert (
"%1 enter_pass_ctx"
"%2 enter_pass_ctx"
"%1 should_run"
"%2 should_run"
"%1 run_before_pass"
"%1 exit_pass_ctx"
"%2 exit_pass_ctx" == "".join(events)
)
def test_run_after_exception():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def enter_pass_ctx(self):
events.append(self.id + " enter_pass_ctx")
def exit_pass_ctx(self):
events.append(self.id + " exit_pass_ctx")
def should_run(self, mod, info):
events.append(self.id + " should_run")
return True
def run_before_pass(self, mod, info):
events.append(self.id + " run_before_pass")
def run_after_pass(self, mod, info):
events.append(self.id + " run_after_pass")
raise RuntimeError("Just a dummy error")
@tvm.transform.module_pass(opt_level=2)
def transform(mod, ctx):
events.append("transform pass")
return mod
x, y = [tvm.relay.var(c, shape=(3, 4), dtype="float32") for c in "xy"]
mod = tvm.IRModule.from_expr(tvm.relay.add(x, y))
with pytest.raises(tvm.error.TVMError) as cm:
with tvm.transform.PassContext(instruments=[PI("%1"), PI("%2")]):
mod = transform(mod)
assert "Just a dummy error" in str(cm.exception)
assert (
"%1 enter_pass_ctx"
"%2 enter_pass_ctx"
"%1 should_run"
"%2 should_run"
"%1 run_before_pass"
"%2 run_before_pass"
"transform pass"
"%1 run_after_pass"
"%1 exit_pass_ctx"
"%2 exit_pass_ctx" == "".join(events)
)
def test_instrument_call_sequence():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def enter_pass_ctx(self):
events.append(self.id + " enter_pass_ctx")
def exit_pass_ctx(self):
events.append(self.id + " exit_pass_ctx")
def should_run(self, mod, info):
events.append(" " + self.id + " should_run")
return True
def run_before_pass(self, mod, info):
events.append(" " + self.id + " run_before_pass")
def run_after_pass(self, mod, info):
events.append(" " + self.id + " run_after_pass")
@tvm.transform.module_pass(opt_level=2)
def transform1(mod, ctx):
events.append(" transform1 pass")
return mod
@tvm.transform.module_pass(opt_level=2)
def transform2(mod, ctx):
events.append(" transform2 pass")
return mod
mod = get_test_model()
with tvm.transform.PassContext(instruments=[PI("%1"), PI("%2")]):
mod = transform1(mod)
mod = transform2(mod)
assert (
"%1 enter_pass_ctx"
"%2 enter_pass_ctx"
" %1 should_run"
" %2 should_run"
" %1 run_before_pass"
" %2 run_before_pass"
" transform1 pass"
" %1 run_after_pass"
" %2 run_after_pass"
" %1 should_run"
" %2 should_run"
" %1 run_before_pass"
" %2 run_before_pass"
" transform2 pass"
" %1 run_after_pass"
" %2 run_after_pass"
"%1 exit_pass_ctx"
"%2 exit_pass_ctx" == "".join(events)
)
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2019 by Brendt Wohlberg <brendt@ieee.org>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""Plotting/visualisation functions"""
from __future__ import absolute_import, division, print_function
from builtins import range
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.pyplot import figure, subplot, subplots, gcf, gca, savefig
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d import Axes3D
try:
import mpldatacursor as mpldc
except ImportError:
have_mpldc = False
else:
have_mpldc = True
__author__ = """Brendt Wohlberg <brendt@ieee.org>"""
__all__ = ['plot', 'surf', 'contour', 'imview', 'close',
'set_ipython_plot_backend', 'set_notebook_plot_backend',
'config_notebook_plotting']
def attach_keypress(fig, scaling=1.1):
"""
Attach a key press event handler that configures keys for closing a
figure and changing the figure size. Keys 'e' and 'c' respectively
expand and contract the figure, and key 'q' closes it.
**Note:** Resizing may not function correctly with all matplotlib
backends (a
`bug <https://github.com/matplotlib/matplotlib/issues/10083>`__
has been reported).
Parameters
----------
fig : :class:`matplotlib.figure.Figure` object
Figure to which event handling is to be attached
scaling : float, optional (default 1.1)
Scaling factor for figure size changes
Returns
-------
press : function
Key press event handler function
"""
def press(event):
if event.key == 'q':
plt.close(fig)
elif event.key == 'e':
fig.set_size_inches(scaling * fig.get_size_inches(), forward=True)
elif event.key == 'c':
fig.set_size_inches(fig.get_size_inches() / scaling, forward=True)
# Avoid multiple event handlers attached to the same figure
if not hasattr(fig, '_sporco_keypress_cid'):
cid = fig.canvas.mpl_connect('key_press_event', press)
fig._sporco_keypress_cid = cid
return press
def attach_zoom(ax, scaling=2.0):
"""
Attach an event handler that supports zooming within a plot using
the mouse scroll wheel.
Parameters
----------
ax : :class:`matplotlib.axes.Axes` object
Axes to which event handling is to be attached
scaling : float, optional (default 2.0)
Scaling factor for zooming in and out
Returns
-------
zoom : function
Mouse scroll wheel event handler function
"""
# See https://stackoverflow.com/questions/11551049
def zoom(event):
# Get the current x and y limits
cur_xlim = ax.get_xlim()
cur_ylim = ax.get_ylim()
# Get event location
xdata = event.xdata
ydata = event.ydata
# Return if cursor is not over valid region of plot
if xdata is None or ydata is None:
return
if event.button == 'up':
# Deal with zoom in
scale_factor = 1.0 / scaling
elif event.button == 'down':
# Deal with zoom out
scale_factor = scaling
# Get distance from the cursor to the edge of the figure frame
x_left = xdata - cur_xlim[0]
x_right = cur_xlim[1] - xdata
y_top = ydata - cur_ylim[0]
y_bottom = cur_ylim[1] - ydata
# Calculate new x and y limits
new_xlim = (xdata - x_left * scale_factor,
xdata + x_right * scale_factor)
new_ylim = (ydata - y_top * scale_factor,
ydata + y_bottom * scale_factor)
# Ensure that x limit range is no larger than that of the reference
if np.diff(new_xlim) > np.diff(zoom.xlim_ref):
new_xlim *= np.diff(zoom.xlim_ref) / np.diff(new_xlim)
# Ensure that lower x limit is not less than that of the reference
if new_xlim[0] < zoom.xlim_ref[0]:
new_xlim += np.array(zoom.xlim_ref[0] - new_xlim[0])
# Ensure that upper x limit is not greater than that of the reference
if new_xlim[1] > zoom.xlim_ref[1]:
new_xlim -= np.array(new_xlim[1] - zoom.xlim_ref[1])
# Ensure that ylim tuple has the smallest value first
if zoom.ylim_ref[1] < zoom.ylim_ref[0]:
ylim_ref = zoom.ylim_ref[::-1]
new_ylim = new_ylim[::-1]
else:
ylim_ref = zoom.ylim_ref
# Ensure that y limit range is no larger than that of the reference
if np.diff(new_ylim) > np.diff(ylim_ref):
new_ylim *= np.diff(ylim_ref) / np.diff(new_ylim)
# Ensure that lower y limit is not less than that of the reference
if new_ylim[0] < ylim_ref[0]:
new_ylim += np.array(ylim_ref[0] - new_ylim[0])
# Ensure that upper y limit is not greater than that of the reference
if new_ylim[1] > ylim_ref[1]:
new_ylim -= np.array(new_ylim[1] - ylim_ref[1])
# Return the ylim tuple to its original order
if zoom.ylim_ref[1] < zoom.ylim_ref[0]:
new_ylim = new_ylim[::-1]
# Set new x and y limits
ax.set_xlim(new_xlim)
ax.set_ylim(new_ylim)
# Force redraw
ax.figure.canvas.draw()
# Record reference x and y limits prior to any zooming
zoom.xlim_ref = ax.get_xlim()
zoom.ylim_ref = ax.get_ylim()
# Get figure for specified axes and attach the event handler
fig = ax.get_figure()
fig.canvas.mpl_connect('scroll_event', zoom)
return zoom
def plot(y, x=None, ptyp='plot', xlbl=None, ylbl=None, title=None,
lgnd=None, lglc=None, **kwargs):
"""
Plot points or lines in 2D. If a figure object is specified then the
plot is drawn in that figure, and ``fig.show()`` is not called. The
figure is closed on key entry 'q'.
Parameters
----------
y : array_like
1d or 2d array of data to plot. If a 2d array, each column is
plotted as a separate curve.
x : array_like, optional (default None)
Values for x-axis of the plot
ptyp : string, optional (default 'plot')
Plot type specification (options are 'plot', 'semilogx',
'semilogy', and 'loglog')
xlbl : string, optional (default None)
Label for x-axis
ylbl : string, optional (default None)
Label for y-axis
title : string, optional (default None)
Figure title
lgnd : list of strings, optional (default None)
List of legend string
lglc : string, optional (default None)
Legend location string
**kwargs : :class:`matplotlib.lines.Line2D` properties or figure \
properties, optional
Keyword arguments specifying :class:`matplotlib.lines.Line2D`
properties, e.g. ``lw=2.0`` sets a line width of 2, or properties
of the figure and axes. If not specified, the defaults for line
width (``lw``) and marker size (``ms``) are 1.5 and 6.0
respectively. The valid figure and axes keyword arguments are
listed below:
.. |mplfg| replace:: :class:`matplotlib.figure.Figure` object
.. |mplax| replace:: :class:`matplotlib.axes.Axes` object
.. rst-class:: kwargs
===== ==================== ======================================
kwarg Accepts Description
===== ==================== ======================================
fgsz tuple (width,height) Specify figure dimensions in inches
fgnm integer Figure number of figure
fig |mplfg| Draw in specified figure instead of
creating one
ax |mplax| Plot in specified axes instead of
current axes of figure
===== ==================== ======================================
Returns
-------
fig : :class:`matplotlib.figure.Figure` object
Figure object for this figure
ax : :class:`matplotlib.axes.Axes` object
Axes object for this plot
"""
# Extract kwargs entries that are not related to line properties
fgsz = kwargs.pop('fgsz', None)
fgnm = kwargs.pop('fgnm', None)
fig = kwargs.pop('fig', None)
ax = kwargs.pop('ax', None)
figp = fig
if fig is None:
fig = plt.figure(num=fgnm, figsize=fgsz)
fig.clf()
ax = fig.gca()
elif ax is None:
ax = fig.gca()
# Set defaults for line width and marker size
if 'lw' not in kwargs and 'linewidth' not in kwargs:
kwargs['lw'] = 1.5
if 'ms' not in kwargs and 'markersize' not in kwargs:
kwargs['ms'] = 6.0
if ptyp not in ('plot', 'semilogx', 'semilogy', 'loglog'):
raise ValueError("Invalid plot type '%s'" % ptyp)
pltmth = getattr(ax, ptyp)
if x is None:
pltln = pltmth(y, **kwargs)
else:
pltln = pltmth(x, y, **kwargs)
ax.fmt_xdata = lambda x: "{: .2f}".format(x)
ax.fmt_ydata = lambda x: "{: .2f}".format(x)
if title is not None:
ax.set_title(title)
if xlbl is not None:
ax.set_xlabel(xlbl)
if ylbl is not None:
ax.set_ylabel(ylbl)
if lgnd is not None:
ax.legend(lgnd, loc=lglc)
attach_keypress(fig)
attach_zoom(ax)
if have_mpldc:
mpldc.datacursor(pltln)
if figp is None:
fig.show()
return fig, ax
def surf(z, x=None, y=None, elev=None, azim=None, xlbl=None, ylbl=None,
zlbl=None, title=None, lblpad=8.0, alpha=1.0, cntr=None,
cmap=None, fgsz=None, fgnm=None, fig=None, ax=None):
"""
Plot a 2D surface in 3D. If a figure object is specified then the
surface is drawn in that figure, and ``fig.show()`` is not called.
The figure is closed on key entry 'q'.
Parameters
----------
z : array_like
2d array of data to plot
x : array_like, optional (default None)
Values for x-axis of the plot
y : array_like, optional (default None)
Values for y-axis of the plot
elev : float
Elevation angle (in degrees) in the z plane
azim : foat
Azimuth angle (in degrees) in the x,y plane
xlbl : string, optional (default None)
Label for x-axis
ylbl : string, optional (default None)
Label for y-axis
zlbl : string, optional (default None)
Label for z-axis
title : string, optional (default None)
Figure title
lblpad : float, optional (default 8.0)
Label padding
alpha : float between 0.0 and 1.0, optional (default 1.0)
Transparency
cntr : int or sequence of ints, optional (default None)
If not None, plot contours of the surface on the lower end of
the z-axis. An int specifies the number of contours to plot, and
a sequence specifies the specific contour levels to plot.
cmap : :class:`matplotlib.colors.Colormap` object, optional (default None)
Colour map for surface. If none specifed, defaults to cm.YlOrRd
fgsz : tuple (width,height), optional (default None)
Specify figure dimensions in inches
fgnm : integer, optional (default None)
Figure number of figure
fig : :class:`matplotlib.figure.Figure` object, optional (default None)
Draw in specified figure instead of creating one
ax : :class:`matplotlib.axes.Axes` object, optional (default None)
Plot in specified axes instead of creating one
Returns
-------
fig : :class:`matplotlib.figure.Figure` object
Figure object for this figure
ax : :class:`matplotlib.axes.Axes` object
Axes object for this plot
"""
figp = fig
if fig is None:
fig = plt.figure(num=fgnm, figsize=fgsz)
fig.clf()
ax = plt.axes(projection='3d')
else:
if ax is None:
ax = plt.axes(projection='3d')
else:
# See https://stackoverflow.com/a/43563804
# https://stackoverflow.com/a/35221116
if ax.name != '3d':
ax.remove()
ax = fig.add_subplot(ax.get_subplotspec(), projection='3d')
if elev is not None or azim is not None:
ax.view_init(elev=elev, azim=azim)
if cmap is None:
cmap = cm.YlOrRd
if x is None:
x = range(z.shape[1])
if y is None:
y = range(z.shape[0])
xg, yg = np.meshgrid(x, y)
ax.plot_surface(xg, yg, z, rstride=1, cstride=1, alpha=alpha, cmap=cmap)
if cntr is not None:
offset = np.around(z.min() - 0.2 * (z.max() - z.min()), 3)
ax.contour(xg, yg, z, cntr, cmap=cmap, linewidths=2,
linestyles="solid", offset=offset)
ax.set_zlim(offset, ax.get_zlim()[1])
ax.fmt_xdata = lambda x: "{: .2f}".format(x)
ax.fmt_ydata = lambda x: "{: .2f}".format(x)
ax.fmt_zdata = lambda x: "{: .2f}".format(x)
if title is not None:
ax.set_title(title)
if xlbl is not None:
ax.set_xlabel(xlbl, labelpad=lblpad)
if ylbl is not None:
ax.set_ylabel(ylbl, labelpad=lblpad)
if zlbl is not None:
ax.set_zlabel(zlbl, labelpad=lblpad)
attach_keypress(fig)
if figp is None:
fig.show()
return fig, ax
def contour(z, x=None, y=None, v=5, xlog=False, ylog=False, xlbl=None,
ylbl=None, title=None, cfmt=None, cfntsz=10, lfntsz=None,
alpha=1.0, cmap=None, vmin=None, vmax=None, fgsz=None, fgnm=None,
fig=None, ax=None):
"""
Contour plot of a 2D surface. If a figure object is specified then the
plot is drawn in that figure, and ``fig.show()`` is not called. The
figure is closed on key entry 'q'.
Parameters
----------
z : array_like
2d array of data to plot
x : array_like, optional (default None)
Values for x-axis of the plot
y : array_like, optional (default None)
Values for y-axis of the plot
v : int or sequence of floats, optional (default 5)
An int specifies the number of contours to plot, and a sequence
specifies the specific contour levels to plot.
xlog : boolean, optional (default False)
Set x-axis to log scale
ylog : boolean, optional (default False)
Set y-axis to log scale
xlbl : string, optional (default None)
Label for x-axis
ylbl : string, optional (default None)
Label for y-axis
title : string, optional (default None)
Figure title
cfmt : string, optional (default None)
Format string for contour labels.
cfntsz : int or None, optional (default 10)
Contour label font size. No contour labels are displayed if
set to 0 or None.
lfntsz : int, optional (default None)
Axis label font size. The default font size is used if set to None.
alpha : float, optional (default 1.0)
Underlying image display alpha value
cmap : :class:`matplotlib.colors.Colormap`, optional (default None)
Colour map for surface. If none specifed, defaults to cm.YlOrRd
vmin, vmax : float, optional (default None)
Set upper and lower bounds for the colour map (see the corresponding
parameters of :meth:`matplotlib.axes.Axes.imshow`)
fgsz : tuple (width,height), optional (default None)
Specify figure dimensions in inches
fgnm : integer, optional (default None)
Figure number of figure
fig : :class:`matplotlib.figure.Figure` object, optional (default None)
Draw in specified figure instead of creating one
ax : :class:`matplotlib.axes.Axes` object, optional (default None)
Plot in specified axes instead of current axes of figure
Returns
-------
fig : :class:`matplotlib.figure.Figure` object
Figure object for this figure
ax : :class:`matplotlib.axes.Axes` object
Axes object for this plot
"""
figp = fig
if fig is None:
fig = plt.figure(num=fgnm, figsize=fgsz)
fig.clf()
ax = fig.gca()
elif ax is None:
ax = fig.gca()
if xlog:
ax.set_xscale('log')
if ylog:
ax.set_yscale('log')
if cmap is None:
cmap = cm.YlOrRd
if x is None:
x = np.arange(z.shape[1])
else:
x = np.array(x)
if y is None:
y = np.arange(z.shape[0])
else:
y = np.array(y)
xg, yg = np.meshgrid(x, y)
cntr = ax.contour(xg, yg, z, v, colors='black')
kwargs = {}
if cfntsz is not None and cfntsz > 0:
kwargs['fontsize'] = cfntsz
if cfmt is not None:
kwargs['fmt'] = cfmt
if kwargs:
plt.clabel(cntr, inline=True, **kwargs)
pc = ax.pcolormesh(xg, yg, z, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha,
shading='gouraud', clim=(vmin, vmax))
if xlog:
ax.fmt_xdata = lambda x: "{: .2e}".format(x)
else:
ax.fmt_xdata = lambda x: "{: .2f}".format(x)
if ylog:
ax.fmt_ydata = lambda x: "{: .2e}".format(x)
else:
ax.fmt_ydata = lambda x: "{: .2f}".format(x)
if title is not None:
ax.set_title(title)
if xlbl is not None:
ax.set_xlabel(xlbl, fontsize=lfntsz)
if ylbl is not None:
ax.set_ylabel(ylbl, fontsize=lfntsz)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
plt.colorbar(pc, ax=ax, cax=cax)
attach_keypress(fig)
attach_zoom(ax)
if have_mpldc:
mpldc.datacursor()
if figp is None:
fig.show()
return fig, ax
def imview(img, title=None, copy=True, fltscl=False, intrp='nearest',
norm=None, cbar=False, cmap=None, fgsz=None, fgnm=None,
fig=None, ax=None):
"""
Display an image. Pixel values are displayed when the pointer is over
valid image data. If a figure object is specified then the image is
drawn in that figure, and ``fig.show()`` is not called. The figure is
closed on key entry 'q'.
Parameters
----------
img : array_like, shape (Nr, Nc) or (Nr, Nc, 3) or (Nr, Nc, 4)
Image to display
title : string, optional (default None)
Figure title
copy : boolean, optional (default True)
If True, create a copy of input `img` as a reference for displayed
pixel values, ensuring that displayed values do not change when the
array changes in the calling scope. Set this flag to False if the
overhead of an additional copy of the input image is not acceptable.
fltscl : boolean, optional (default False)
If True, rescale and shift floating point arrays to [0,1]
intrp : string, optional (default 'nearest')
Specify type of interpolation used to display image (see
``interpolation`` parameter of :meth:`matplotlib.axes.Axes.imshow`)
norm : :class:`matplotlib.colors.Normalize` object, optional (default None)
Specify the :class:`matplotlib.colors.Normalize` instance used to
scale pixel values for input to the colour map
cbar : boolean, optional (default False)
Flag indicating whether to display colorbar
cmap : :class:`matplotlib.colors.Colormap`, optional (default None)
Colour map for image. If none specifed, defaults to cm.Greys_r
for monochrome image
fgsz : tuple (width,height), optional (default None)
Specify figure dimensions in inches
fgnm : integer, optional (default None)
Figure number of figure
fig : :class:`matplotlib.figure.Figure` object, optional (default None)
Draw in specified figure instead of creating one
ax : :class:`matplotlib.axes.Axes` object, optional (default None)
Plot in specified axes instead of current axes of figure
Returns
-------
fig : :class:`matplotlib.figure.Figure` object
Figure object for this figure
ax : :class:`matplotlib.axes.Axes` object
Axes object for this plot
"""
if img.ndim > 2 and img.shape[2] != 3:
raise ValueError('Argument img must be an Nr x Nc array or an '
'Nr x Nc x 3 array')
figp = fig
if fig is None:
fig = plt.figure(num=fgnm, figsize=fgsz)
fig.clf()
ax = fig.gca()
elif ax is None:
ax = fig.gca()
# Deal with removal of 'box-forced' adjustable in Matplotlib 2.2.0
mplv = matplotlib.__version__.split('.')
if int(mplv[0]) > 2 or (int(mplv[0]) == 2 and int(mplv[1]) >= 2):
try:
ax.set_adjustable('box')
except Exception:
ax.set_adjustable('datalim')
else:
ax.set_adjustable('box-forced')
imgd = img.copy()
if copy:
# Keep a separate copy of the input image so that the original
# pixel values can be display rather than the scaled pixel
# values that are actually plotted.
img = img.copy()
if cmap is None and img.ndim == 2:
cmap = cm.Greys_r
if np.issubdtype(img.dtype, np.floating):
if fltscl:
imgd -= imgd.min()
imgd /= imgd.max()
if img.ndim > 2:
imgd = np.clip(imgd, 0.0, 1.0)
elif img.dtype == np.uint16:
imgd = np.float16(imgd) / np.iinfo(np.uint16).max
elif img.dtype == np.int16:
imgd = np.float16(imgd) - imgd.min()
imgd /= imgd.max()
if norm is None:
im = ax.imshow(imgd, cmap=cmap, interpolation=intrp, vmin=imgd.min(),
vmax=imgd.max())
else:
im = ax.imshow(imgd, cmap=cmap, interpolation=intrp, norm=norm)
ax.set_yticklabels([])
ax.set_xticklabels([])
if title is not None:
ax.set_title(title)
if cbar or cbar is None:
orient = 'vertical' if img.shape[0] >= img.shape[1] else 'horizontal'
pos = 'right' if orient == 'vertical' else 'bottom'
divider = make_axes_locatable(ax)
cax = divider.append_axes(pos, size="5%", pad=0.2)
if cbar is None:
# See http://chris35wills.github.io/matplotlib_axis
if hasattr(cax, 'set_facecolor'):
cax.set_facecolor('none')
else:
cax.set_axis_bgcolor('none')
for axis in ['top', 'bottom', 'left', 'right']:
cax.spines[axis].set_linewidth(0)
cax.set_xticks([])
cax.set_yticks([])
else:
plt.colorbar(im, ax=ax, cax=cax, orientation=orient)
def format_coord(x, y):
nr, nc = imgd.shape[0:2]
col = int(x + 0.5)
row = int(y + 0.5)
if col >= 0 and col < nc and row >= 0 and row < nr:
z = img[row, col]
if imgd.ndim == 2:
return 'x=%6.2f, y=%6.2f, z=%.2f' % (x, y, z)
else:
return 'x=%6.2f, y=%6.2f, z=(%.2f,%.2f,%.2f)' % \
sum(((x,), (y,), tuple(z)), ())
else:
return 'x=%.2f, y=%.2f' % (x, y)
ax.format_coord = format_coord
if fig.canvas.toolbar is not None:
# See https://stackoverflow.com/a/47086132
def mouse_move(self, event):
if event.inaxes and event.inaxes.get_navigate():
s = event.inaxes.format_coord(event.xdata, event.ydata)
self.set_message(s)
def mouse_move_patch(arg):
return mouse_move(fig.canvas.toolbar, arg)
fig.canvas.toolbar._idDrag = fig.canvas.mpl_connect(
'motion_notify_event', mouse_move_patch)
attach_keypress(fig)
attach_zoom(ax)
if have_mpldc:
mpldc.datacursor(display='single')
if figp is None:
fig.show()
return fig, ax
def close(fig=None):
"""
Close figure(s). If a figure object reference or figure number is
provided, close the specified figure, otherwise close all figures.
Parameters
----------
fig : :class:`matplotlib.figure.Figure` object or integer,\
optional (default None)
Figure object or number of figure to close
"""
if fig is None:
plt.close('all')
else:
plt.close(fig)
def set_ipython_plot_backend(backend='qt'):
"""
Set matplotlib backend within an ipython shell. Ths function has the
same effect as the line magic ``%matplotlib [backend]`` but is called
as a function and includes a check to determine whether the code is
running in an ipython shell, so that it can safely be used within a
normal python script since it has no effect when not running in an
ipython shell.
Parameters
----------
backend : string, optional (default 'qt')
Name of backend to be passed to the ``%matplotlib`` line magic
command
"""
from sporco.util import in_ipython
if in_ipython():
# See https://stackoverflow.com/questions/35595766
get_ipython().run_line_magic('matplotlib', backend)
def set_notebook_plot_backend(backend='inline'):
"""
Set matplotlib backend within a Jupyter Notebook shell. Ths function
has the same effect as the line magic ``%matplotlib [backend]`` but is
called as a function and includes a check to determine whether the code
is running in a notebook shell, so that it can safely be used within a
normal python script since it has no effect when not running in a
notebook shell.
Parameters
----------
backend : string, optional (default 'inline')
Name of backend to be passed to the ``%matplotlib`` line magic
command
"""
from sporco.util import in_notebook
if in_notebook():
# See https://stackoverflow.com/questions/35595766
get_ipython().run_line_magic('matplotlib', backend)
def config_notebook_plotting():
"""
Configure plotting functions for inline plotting within a Jupyter
Notebook shell. This function has no effect when not within a
notebook shell, and may therefore be used within a normal python
script.
"""
# Check whether running within a notebook shell and have
# not already monkey patched the plot function
from sporco.util import in_notebook
module = sys.modules[__name__]
if in_notebook() and module.plot.__name__ == 'plot':
# Set inline backend (i.e. %matplotlib inline) if in a notebook shell
set_notebook_plot_backend()
# Replace plot function with a wrapper function that discards
# its return value (within a notebook with inline plotting, plots
# are duplicated if the return value from the original function is
# not assigned to a variable)
plot_original = module.plot
def plot_wrap(*args, **kwargs):
plot_original(*args, **kwargs)
module.plot = plot_wrap
# Replace surf function with a wrapper function that discards
# its return value (see comment for plot function)
surf_original = module.surf
def surf_wrap(*args, **kwargs):
surf_original(*args, **kwargs)
module.surf = surf_wrap
# Replace contour function with a wrapper function that discards
# its return value (see comment for plot function)
contour_original = module.contour
def contour_wrap(*args, **kwargs):
contour_original(*args, **kwargs)
module.contour = contour_wrap
# Replace imview function with a wrapper function that discards
# its return value (see comment for plot function)
imview_original = module.imview
def imview_wrap(*args, **kwargs):
imview_original(*args, **kwargs)
module.imview = imview_wrap
# Disable figure show method (results in a warning if used within
# a notebook with inline plotting)
import matplotlib.figure
def show_disable(self):
pass
matplotlib.figure.Figure.show = show_disable
| |
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A utility class to send to and recv from a non-blocking socket."""
from __future__ import with_statement
import sys
import zmq
from zmq.utils import jsonapi
try:
import cPickle as pickle
except ImportError:
import pickle
from .ioloop import IOLoop
try:
# gen_log will only import from >= 3.0
from tornado.log import gen_log
from tornado import stack_context
except ImportError:
from .minitornado.log import gen_log
from .minitornado import stack_context
try:
from queue import Queue
except ImportError:
from Queue import Queue
from zmq.utils.strtypes import bytes, unicode, basestring
try:
callable
except NameError:
callable = lambda obj: hasattr(obj, '__call__')
class ZMQStream(object):
"""A utility class to register callbacks when a zmq socket sends and receives
For use with zmq.eventloop.ioloop
There are three main methods
Methods:
* **on_recv(callback, copy=True):**
register a callback to be run every time the socket has something to receive
* **on_send(callback):**
register a callback to be run every time you call send
* **send(self, msg, flags=0, copy=False, callback=None):**
perform a send that will trigger the callback
if callback is passed, on_send is also called.
There are also send_multipart(), send_json(), send_pyobj()
Three other methods for deactivating the callbacks:
* **stop_on_recv():**
turn off the recv callback
* **stop_on_send():**
turn off the send callback
which simply call ``on_<evt>(None)``.
The entire socket interface, excluding direct recv methods, is also
provided, primarily through direct-linking the methods.
e.g.
>>> stream.bind is stream.socket.bind
True
"""
socket = None
io_loop = None
poller = None
_send_queue = None
_recv_callback = None
_send_callback = None
_close_callback = None
_state = 0
_flushed = False
_recv_copy = False
def __init__(self, socket, io_loop=None):
self.socket = socket
self.io_loop = io_loop or IOLoop.instance()
self.poller = zmq.Poller()
self._send_queue = Queue()
self._recv_callback = None
self._send_callback = None
self._close_callback = None
self._recv_copy = False
self._flushed = False
self._state = self.io_loop.ERROR
self._init_io_state()
# shortcircuit some socket methods
self.bind = self.socket.bind
self.bind_to_random_port = self.socket.bind_to_random_port
self.connect = self.socket.connect
self.setsockopt = self.socket.setsockopt
self.getsockopt = self.socket.getsockopt
self.setsockopt_string = self.socket.setsockopt_string
self.getsockopt_string = self.socket.getsockopt_string
self.setsockopt_unicode = self.socket.setsockopt_unicode
self.getsockopt_unicode = self.socket.getsockopt_unicode
def stop_on_recv(self):
"""Disable callback and automatic receiving."""
return self.on_recv(None)
def stop_on_send(self):
"""Disable callback on sending."""
return self.on_send(None)
def stop_on_err(self):
"""DEPRECATED, does nothing"""
gen_log.warn("on_err does nothing, and will be removed")
def on_err(self, callback):
"""DEPRECATED, does nothing"""
gen_log.warn("on_err does nothing, and will be removed")
def on_recv(self, callback, copy=True):
"""Register a callback for when a message is ready to recv.
There can be only one callback registered at a time, so each
call to `on_recv` replaces previously registered callbacks.
on_recv(None) disables recv event polling.
Use on_recv_stream(callback) instead, to register a callback that will receive
both this ZMQStream and the message, instead of just the message.
Parameters
----------
callback : callable
callback must take exactly one argument, which will be a
list, as returned by socket.recv_multipart()
if callback is None, recv callbacks are disabled.
copy : bool
copy is passed directly to recv, so if copy is False,
callback will receive Message objects. If copy is True,
then callback will receive bytes/str objects.
Returns : None
"""
self._check_closed()
assert callback is None or callable(callback)
self._recv_callback = stack_context.wrap(callback)
self._recv_copy = copy
if callback is None:
self._drop_io_state(self.io_loop.READ)
else:
self._add_io_state(self.io_loop.READ)
def on_recv_stream(self, callback, copy=True):
"""Same as on_recv, but callback will get this stream as first argument
callback must take exactly two arguments, as it will be called as::
callback(stream, msg)
Useful when a single callback should be used with multiple streams.
"""
if callback is None:
self.stop_on_recv()
else:
self.on_recv(lambda msg: callback(self, msg), copy=copy)
def on_send(self, callback):
"""Register a callback to be called on each send
There will be two arguments::
callback(msg, status)
* `msg` will be the list of sendable objects that was just sent
* `status` will be the return result of socket.send_multipart(msg) -
MessageTracker or None.
Non-copying sends return a MessageTracker object whose
`done` attribute will be True when the send is complete.
This allows users to track when an object is safe to write to
again.
The second argument will always be None if copy=True
on the send.
Use on_send_stream(callback) to register a callback that will be passed
this ZMQStream as the first argument, in addition to the other two.
on_send(None) disables recv event polling.
Parameters
----------
callback : callable
callback must take exactly two arguments, which will be
the message being sent (always a list),
and the return result of socket.send_multipart(msg) -
MessageTracker or None.
if callback is None, send callbacks are disabled.
"""
self._check_closed()
assert callback is None or callable(callback)
self._send_callback = stack_context.wrap(callback)
def on_send_stream(self, callback):
"""Same as on_send, but callback will get this stream as first argument
Callback will be passed three arguments::
callback(stream, msg, status)
Useful when a single callback should be used with multiple streams.
"""
if callback is None:
self.stop_on_send()
else:
self.on_send(lambda msg, status: callback(self, msg, status))
def send(self, msg, flags=0, copy=True, track=False, callback=None):
"""Send a message, optionally also register a new callback for sends.
See zmq.socket.send for details.
"""
return self.send_multipart([msg], flags=flags, copy=copy, track=track, callback=callback)
def send_multipart(self, msg, flags=0, copy=True, track=False, callback=None):
"""Send a multipart message, optionally also register a new callback for sends.
See zmq.socket.send_multipart for details.
"""
kwargs = dict(flags=flags, copy=copy, track=track)
self._send_queue.put((msg, kwargs))
callback = callback or self._send_callback
if callback is not None:
self.on_send(callback)
else:
# noop callback
self.on_send(lambda *args: None)
self._add_io_state(self.io_loop.WRITE)
def send_string(self, u, flags=0, encoding='utf-8', callback=None):
"""Send a unicode message with an encoding.
See zmq.socket.send_unicode for details.
"""
if not isinstance(u, basestring):
raise TypeError("unicode/str objects only")
return self.send(u.encode(encoding), flags=flags, callback=callback)
send_unicode = send_string
def send_json(self, obj, flags=0, callback=None):
"""Send json-serialized version of an object.
See zmq.socket.send_json for details.
"""
if jsonapi is None:
raise ImportError('jsonlib{1,2}, json or simplejson library is required.')
else:
msg = jsonapi.dumps(obj)
return self.send(msg, flags=flags, callback=callback)
def send_pyobj(self, obj, flags=0, protocol=-1, callback=None):
"""Send a Python object as a message using pickle to serialize.
See zmq.socket.send_json for details.
"""
msg = pickle.dumps(obj, protocol)
return self.send(msg, flags, callback=callback)
def _finish_flush(self):
"""callback for unsetting _flushed flag."""
self._flushed = False
def flush(self, flag=zmq.POLLIN|zmq.POLLOUT, limit=None):
"""Flush pending messages.
This method safely handles all pending incoming and/or outgoing messages,
bypassing the inner loop, passing them to the registered callbacks.
A limit can be specified, to prevent blocking under high load.
flush will return the first time ANY of these conditions are met:
* No more events matching the flag are pending.
* the total number of events handled reaches the limit.
Note that if ``flag|POLLIN != 0``, recv events will be flushed even if no callback
is registered, unlike normal IOLoop operation. This allows flush to be
used to remove *and ignore* incoming messages.
Parameters
----------
flag : int, default=POLLIN|POLLOUT
0MQ poll flags.
If flag|POLLIN, recv events will be flushed.
If flag|POLLOUT, send events will be flushed.
Both flags can be set at once, which is the default.
limit : None or int, optional
The maximum number of messages to send or receive.
Both send and recv count against this limit.
Returns
-------
int : count of events handled (both send and recv)
"""
self._check_closed()
# unset self._flushed, so callbacks will execute, in case flush has
# already been called this iteration
already_flushed = self._flushed
self._flushed = False
# initialize counters
count = 0
def update_flag():
"""Update the poll flag, to prevent registering POLLOUT events
if we don't have pending sends."""
return flag & zmq.POLLIN | (self.sending() and flag & zmq.POLLOUT)
flag = update_flag()
if not flag:
# nothing to do
return 0
self.poller.register(self.socket, flag)
events = self.poller.poll(0)
while events and (not limit or count < limit):
s,event = events[0]
if event & zmq.POLLIN: # receiving
self._handle_recv()
count += 1
if self.socket is None:
# break if socket was closed during callback
break
if event & zmq.POLLOUT and self.sending():
self._handle_send()
count += 1
if self.socket is None:
# break if socket was closed during callback
break
flag = update_flag()
if flag:
self.poller.register(self.socket, flag)
events = self.poller.poll(0)
else:
events = []
if count: # only bypass loop if we actually flushed something
# skip send/recv callbacks this iteration
self._flushed = True
# reregister them at the end of the loop
if not already_flushed: # don't need to do it again
self.io_loop.add_callback(self._finish_flush)
elif already_flushed:
self._flushed = True
# update ioloop poll state, which may have changed
self._rebuild_io_state()
return count
def set_close_callback(self, callback):
"""Call the given callback when the stream is closed."""
self._close_callback = stack_context.wrap(callback)
def close(self, linger=None):
"""Close this stream."""
if self.socket is not None:
self.io_loop.remove_handler(self.socket)
self.socket.close(linger)
self.socket = None
if self._close_callback:
self._run_callback(self._close_callback)
def receiving(self):
"""Returns True if we are currently receiving from the stream."""
return self._recv_callback is not None
def sending(self):
"""Returns True if we are currently sending to the stream."""
return not self._send_queue.empty()
def closed(self):
return self.socket is None
def _run_callback(self, callback, *args, **kwargs):
"""Wrap running callbacks in try/except to allow us to
close our socket."""
try:
# Use a NullContext to ensure that all StackContexts are run
# inside our blanket exception handler rather than outside.
with stack_context.NullContext():
callback(*args, **kwargs)
except:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
# Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is
# gc'd, but we don't want to rely on gc happening before we
# run out of file descriptors)
self.close()
# Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error
raise
def _handle_events(self, fd, events):
"""This method is the actual handler for IOLoop, that gets called whenever
an event on my socket is posted. It dispatches to _handle_recv, etc."""
# print "handling events"
if not self.socket:
gen_log.warning("Got events for closed stream %s", fd)
return
try:
# dispatch events:
if events & IOLoop.ERROR:
gen_log.error("got POLLERR event on ZMQStream, which doesn't make sense")
return
if events & IOLoop.READ:
self._handle_recv()
if not self.socket:
return
if events & IOLoop.WRITE:
self._handle_send()
if not self.socket:
return
# rebuild the poll state
self._rebuild_io_state()
except:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
self.close()
raise
def _handle_recv(self):
"""Handle a recv event."""
if self._flushed:
return
try:
msg = self.socket.recv_multipart(zmq.NOBLOCK, copy=self._recv_copy)
except zmq.ZMQError as e:
if e.errno == zmq.EAGAIN:
# state changed since poll event
pass
else:
gen_log.error("RECV Error: %s"%zmq.strerror(e.errno))
else:
if self._recv_callback:
callback = self._recv_callback
# self._recv_callback = None
self._run_callback(callback, msg)
# self.update_state()
def _handle_send(self):
"""Handle a send event."""
if self._flushed:
return
if not self.sending():
gen_log.error("Shouldn't have handled a send event")
return
msg, kwargs = self._send_queue.get()
try:
status = self.socket.send_multipart(msg, **kwargs)
except zmq.ZMQError as e:
gen_log.error("SEND Error: %s", e)
status = e
if self._send_callback:
callback = self._send_callback
self._run_callback(callback, msg, status)
# self.update_state()
def _check_closed(self):
if not self.socket:
raise IOError("Stream is closed")
def _rebuild_io_state(self):
"""rebuild io state based on self.sending() and receiving()"""
if self.socket is None:
return
state = self.io_loop.ERROR
if self.receiving():
state |= self.io_loop.READ
if self.sending():
state |= self.io_loop.WRITE
if state != self._state:
self._state = state
self._update_handler(state)
def _add_io_state(self, state):
"""Add io_state to poller."""
if not self._state & state:
self._state = self._state | state
self._update_handler(self._state)
def _drop_io_state(self, state):
"""Stop poller from watching an io_state."""
if self._state & state:
self._state = self._state & (~state)
self._update_handler(self._state)
def _update_handler(self, state):
"""Update IOLoop handler with state."""
if self.socket is None:
return
self.io_loop.update_handler(self.socket, state)
def _init_io_state(self):
"""initialize the ioloop event handler"""
with stack_context.NullContext():
self.io_loop.add_handler(self.socket, self._handle_events, self._state)
| |
"""
Test for the chocolatey module
"""
import os
import pytest
import salt.modules.chocolatey as chocolatey
import salt.utils
import salt.utils.platform
from tests.support.mock import MagicMock, patch
pytestmark = [
pytest.mark.skipif(
not salt.utils.platform.is_windows(), reason="Not a Windows system"
)
]
@pytest.fixture(scope="module")
def choco_path():
return "C:\\path\\to\\chocolatey.exe"
@pytest.fixture(scope="module")
def choco_path_pd():
return os.path.join(
os.environ.get("ProgramData"), "Chocolatey", "bin", "chocolatey.exe"
)
@pytest.fixture(scope="module")
def choco_path_sd():
return os.path.join(
os.environ.get("SystemDrive"), "Chocolatey", "bin", "chocolatey.bat"
)
@pytest.fixture(scope="module")
def mock_false():
return MagicMock(return_value=False)
@pytest.fixture(scope="module")
def mock_true():
return MagicMock(return_value=True)
@pytest.fixture()
def configure_loader_modules():
return {chocolatey: {"__context__": {}, "__salt__": {}}}
def test__clear_context(choco_path):
"""
Tests _clear_context function
"""
context = {
"chocolatey._yes": ["--yes"],
"chocolatey._path": choco_path,
"chocolatey._version": "0.9.9",
}
with patch.dict(chocolatey.__context__, context):
chocolatey._clear_context()
# Did it clear all chocolatey items from __context__P?
assert chocolatey.__context__ == {}
def test__yes_context():
"""
Tests _yes function when it exists in __context__
"""
with patch.dict(chocolatey.__context__, {"chocolatey._yes": ["--yes"]}):
result = chocolatey._yes()
expected = ["--yes"]
# Did it return correctly
assert result == expected
# Did it populate __context__
assert chocolatey.__context__["chocolatey._yes"] == expected
def test__yes_version_greater():
"""
Test _yes when Chocolatey version is greater than 0.9.9
"""
mock_version = MagicMock(return_value="10.0.0")
with patch("salt.modules.chocolatey.chocolatey_version", mock_version):
result = chocolatey._yes()
expected = ["--yes"]
# Did it return correctly
assert result == expected
# Did it populate __context__
assert chocolatey.__context__["chocolatey._yes"] == expected
def test__yes_version_less_than():
"""
Test _yes when Chocolatey version is less than 0.9.9
"""
mock_version = MagicMock(return_value="0.9.0")
with patch("salt.modules.chocolatey.chocolatey_version", mock_version):
result = chocolatey._yes()
expected = []
# Did it return correctly
assert result == expected
# Did it populate __context__
assert chocolatey.__context__["chocolatey._yes"] == expected
def test__find_chocolatey_context(choco_path):
"""
Test _find_chocolatey when it exists in __context__
"""
with patch.dict(chocolatey.__context__, {"chocolatey._path": choco_path}):
result = chocolatey._find_chocolatey()
expected = choco_path
assert result == expected
def test__find_chocolatey_which(choco_path):
"""
Test _find_chocolatey when found with `cmd.which`
"""
mock_which = MagicMock(return_value=choco_path)
with patch.dict(chocolatey.__salt__, {"cmd.which": mock_which}):
result = chocolatey._find_chocolatey()
expected = choco_path
# Does it return the correct path
assert result == expected
# Does it populate __context__
assert chocolatey.__context__["chocolatey._path"] == expected
def test__find_chocolatey_programdata(mock_false, mock_true, choco_path_pd):
"""
Test _find_chocolatey when found in ProgramData
"""
with patch.dict(chocolatey.__salt__, {"cmd.which": mock_false}), patch(
"os.path.isfile", mock_true
):
result = chocolatey._find_chocolatey()
expected = choco_path_pd
# Does it return the correct path
assert result == expected
# Does it populate __context__
assert chocolatey.__context__["chocolatey._path"] == expected
def test__find_chocolatey_systemdrive(mock_false, choco_path_sd):
"""
Test _find_chocolatey when found on SystemDrive (older versions)
"""
with patch.dict(chocolatey.__salt__, {"cmd.which": mock_false}), patch(
"os.path.isfile", MagicMock(side_effect=[False, True])
):
result = chocolatey._find_chocolatey()
expected = choco_path_sd
# Does it return the correct path
assert result == expected
# Does it populate __context__
assert chocolatey.__context__["chocolatey._path"] == expected
def test_version_check_remote_false():
"""
Test version when remote is False
"""
list_return_value = {"ack": ["3.1.1"]}
with patch.object(chocolatey, "list_", return_value=list_return_value):
expected = {"ack": ["3.1.1"]}
result = chocolatey.version("ack", check_remote=False)
assert result == expected
def test_version_check_remote_true():
"""
Test version when remote is True
"""
list_side_effect = [
{"ack": ["3.1.1"]},
{"ack": ["3.1.1"], "Wolfpack": ["3.0.17"], "blackbird": ["1.0.79.3"]},
]
with patch.object(chocolatey, "list_", side_effect=list_side_effect):
expected = {"ack": {"available": ["3.1.1"], "installed": ["3.1.1"]}}
result = chocolatey.version("ack", check_remote=True)
assert result == expected
def test_version_check_remote_true_not_available():
"""
Test version when remote is True but remote version is unavailable
"""
list_side_effect = [
{"ack": ["3.1.1"]},
{"Wolfpack": ["3.0.17"], "blackbird": ["1.0.79.3"]},
]
with patch.object(chocolatey, "list_", side_effect=list_side_effect):
expected = {"ack": {"installed": ["3.1.1"]}}
result = chocolatey.version("ack", check_remote=True)
assert result == expected
| |
"""Support for Dyson Pure Cool link fan."""
import logging
from libpurecool.const import FanMode, FanSpeed, NightMode, Oscillation
from libpurecool.dyson_pure_cool import DysonPureCool
from libpurecool.dyson_pure_cool_link import DysonPureCoolLink
from libpurecool.dyson_pure_state import DysonPureCoolState
from libpurecool.dyson_pure_state_v2 import DysonPureCoolV2State
import voluptuous as vol
from homeassistant.components.fan import (
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SUPPORT_OSCILLATE,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.const import ATTR_ENTITY_ID
import homeassistant.helpers.config_validation as cv
from . import DYSON_DEVICES
_LOGGER = logging.getLogger(__name__)
ATTR_NIGHT_MODE = "night_mode"
ATTR_AUTO_MODE = "auto_mode"
ATTR_ANGLE_LOW = "angle_low"
ATTR_ANGLE_HIGH = "angle_high"
ATTR_FLOW_DIRECTION_FRONT = "flow_direction_front"
ATTR_TIMER = "timer"
ATTR_HEPA_FILTER = "hepa_filter"
ATTR_CARBON_FILTER = "carbon_filter"
ATTR_DYSON_SPEED = "dyson_speed"
ATTR_DYSON_SPEED_LIST = "dyson_speed_list"
DYSON_DOMAIN = "dyson"
DYSON_FAN_DEVICES = "dyson_fan_devices"
SERVICE_SET_NIGHT_MODE = "set_night_mode"
SERVICE_SET_AUTO_MODE = "set_auto_mode"
SERVICE_SET_ANGLE = "set_angle"
SERVICE_SET_FLOW_DIRECTION_FRONT = "set_flow_direction_front"
SERVICE_SET_TIMER = "set_timer"
SERVICE_SET_DYSON_SPEED = "set_speed"
DYSON_SET_NIGHT_MODE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_NIGHT_MODE): cv.boolean,
}
)
SET_AUTO_MODE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_AUTO_MODE): cv.boolean,
}
)
SET_ANGLE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_ANGLE_LOW): cv.positive_int,
vol.Required(ATTR_ANGLE_HIGH): cv.positive_int,
}
)
SET_FLOW_DIRECTION_FRONT_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_FLOW_DIRECTION_FRONT): cv.boolean,
}
)
SET_TIMER_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_TIMER): cv.positive_int,
}
)
SET_DYSON_SPEED_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_DYSON_SPEED): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Dyson fan components."""
if discovery_info is None:
return
_LOGGER.debug("Creating new Dyson fans")
if DYSON_FAN_DEVICES not in hass.data:
hass.data[DYSON_FAN_DEVICES] = []
# Get Dyson Devices from parent component
has_purecool_devices = False
device_serials = [device.serial for device in hass.data[DYSON_FAN_DEVICES]]
for device in hass.data[DYSON_DEVICES]:
if device.serial not in device_serials:
if isinstance(device, DysonPureCool):
has_purecool_devices = True
dyson_entity = DysonPureCoolDevice(device)
hass.data[DYSON_FAN_DEVICES].append(dyson_entity)
elif isinstance(device, DysonPureCoolLink):
dyson_entity = DysonPureCoolLinkDevice(hass, device)
hass.data[DYSON_FAN_DEVICES].append(dyson_entity)
add_entities(hass.data[DYSON_FAN_DEVICES])
def service_handle(service):
"""Handle the Dyson services."""
entity_id = service.data[ATTR_ENTITY_ID]
fan_device = next(
(fan for fan in hass.data[DYSON_FAN_DEVICES] if fan.entity_id == entity_id),
None,
)
if fan_device is None:
_LOGGER.warning("Unable to find Dyson fan device %s", str(entity_id))
return
if service.service == SERVICE_SET_NIGHT_MODE:
fan_device.set_night_mode(service.data[ATTR_NIGHT_MODE])
if service.service == SERVICE_SET_AUTO_MODE:
fan_device.set_auto_mode(service.data[ATTR_AUTO_MODE])
if service.service == SERVICE_SET_ANGLE:
fan_device.set_angle(
service.data[ATTR_ANGLE_LOW], service.data[ATTR_ANGLE_HIGH]
)
if service.service == SERVICE_SET_FLOW_DIRECTION_FRONT:
fan_device.set_flow_direction_front(service.data[ATTR_FLOW_DIRECTION_FRONT])
if service.service == SERVICE_SET_TIMER:
fan_device.set_timer(service.data[ATTR_TIMER])
if service.service == SERVICE_SET_DYSON_SPEED:
fan_device.set_dyson_speed(service.data[ATTR_DYSON_SPEED])
# Register dyson service(s)
hass.services.register(
DYSON_DOMAIN,
SERVICE_SET_NIGHT_MODE,
service_handle,
schema=DYSON_SET_NIGHT_MODE_SCHEMA,
)
hass.services.register(
DYSON_DOMAIN, SERVICE_SET_AUTO_MODE, service_handle, schema=SET_AUTO_MODE_SCHEMA
)
if has_purecool_devices:
hass.services.register(
DYSON_DOMAIN, SERVICE_SET_ANGLE, service_handle, schema=SET_ANGLE_SCHEMA
)
hass.services.register(
DYSON_DOMAIN,
SERVICE_SET_FLOW_DIRECTION_FRONT,
service_handle,
schema=SET_FLOW_DIRECTION_FRONT_SCHEMA,
)
hass.services.register(
DYSON_DOMAIN, SERVICE_SET_TIMER, service_handle, schema=SET_TIMER_SCHEMA
)
hass.services.register(
DYSON_DOMAIN,
SERVICE_SET_DYSON_SPEED,
service_handle,
schema=SET_DYSON_SPEED_SCHEMA,
)
class DysonPureCoolLinkDevice(FanEntity):
"""Representation of a Dyson fan."""
def __init__(self, hass, device):
"""Initialize the fan."""
_LOGGER.debug("Creating device %s", device.name)
self.hass = hass
self._device = device
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self._device.add_message_listener(self.on_message)
def on_message(self, message):
"""Call when new messages received from the fan."""
if isinstance(message, DysonPureCoolState):
_LOGGER.debug("Message received for fan device %s: %s", self.name, message)
self.schedule_update_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the display name of this fan."""
return self._device.name
def set_speed(self, speed: str) -> None:
"""Set the speed of the fan. Never called ??."""
_LOGGER.debug("Set fan speed to: %s", speed)
if speed == FanSpeed.FAN_SPEED_AUTO.value:
self._device.set_configuration(fan_mode=FanMode.AUTO)
else:
fan_speed = FanSpeed(f"{int(speed):04d}")
self._device.set_configuration(fan_mode=FanMode.FAN, fan_speed=fan_speed)
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the fan."""
_LOGGER.debug("Turn on fan %s with speed %s", self.name, speed)
if speed:
if speed == FanSpeed.FAN_SPEED_AUTO.value:
self._device.set_configuration(fan_mode=FanMode.AUTO)
else:
fan_speed = FanSpeed(f"{int(speed):04d}")
self._device.set_configuration(
fan_mode=FanMode.FAN, fan_speed=fan_speed
)
else:
# Speed not set, just turn on
self._device.set_configuration(fan_mode=FanMode.FAN)
def turn_off(self, **kwargs) -> None:
"""Turn off the fan."""
_LOGGER.debug("Turn off fan %s", self.name)
self._device.set_configuration(fan_mode=FanMode.OFF)
def oscillate(self, oscillating: bool) -> None:
"""Turn on/off oscillating."""
_LOGGER.debug("Turn oscillation %s for device %s", oscillating, self.name)
if oscillating:
self._device.set_configuration(oscillation=Oscillation.OSCILLATION_ON)
else:
self._device.set_configuration(oscillation=Oscillation.OSCILLATION_OFF)
@property
def oscillating(self):
"""Return the oscillation state."""
return self._device.state and self._device.state.oscillation == "ON"
@property
def is_on(self):
"""Return true if the entity is on."""
if self._device.state:
return self._device.state.fan_mode in ["FAN", "AUTO"]
return False
@property
def speed(self) -> str:
"""Return the current speed."""
if self._device.state:
if self._device.state.speed == FanSpeed.FAN_SPEED_AUTO.value:
return self._device.state.speed
return int(self._device.state.speed)
return None
@property
def current_direction(self):
"""Return direction of the fan [forward, reverse]."""
return None
@property
def night_mode(self):
"""Return Night mode."""
return self._device.state.night_mode == "ON"
def set_night_mode(self, night_mode: bool) -> None:
"""Turn fan in night mode."""
_LOGGER.debug("Set %s night mode %s", self.name, night_mode)
if night_mode:
self._device.set_configuration(night_mode=NightMode.NIGHT_MODE_ON)
else:
self._device.set_configuration(night_mode=NightMode.NIGHT_MODE_OFF)
@property
def auto_mode(self):
"""Return auto mode."""
return self._device.state.fan_mode == "AUTO"
def set_auto_mode(self, auto_mode: bool) -> None:
"""Turn fan in auto mode."""
_LOGGER.debug("Set %s auto mode %s", self.name, auto_mode)
if auto_mode:
self._device.set_configuration(fan_mode=FanMode.AUTO)
else:
self._device.set_configuration(fan_mode=FanMode.FAN)
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
supported_speeds = [
FanSpeed.FAN_SPEED_AUTO.value,
int(FanSpeed.FAN_SPEED_1.value),
int(FanSpeed.FAN_SPEED_2.value),
int(FanSpeed.FAN_SPEED_3.value),
int(FanSpeed.FAN_SPEED_4.value),
int(FanSpeed.FAN_SPEED_5.value),
int(FanSpeed.FAN_SPEED_6.value),
int(FanSpeed.FAN_SPEED_7.value),
int(FanSpeed.FAN_SPEED_8.value),
int(FanSpeed.FAN_SPEED_9.value),
int(FanSpeed.FAN_SPEED_10.value),
]
return supported_speeds
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_OSCILLATE | SUPPORT_SET_SPEED
@property
def device_state_attributes(self) -> dict:
"""Return optional state attributes."""
return {ATTR_NIGHT_MODE: self.night_mode, ATTR_AUTO_MODE: self.auto_mode}
class DysonPureCoolDevice(FanEntity):
"""Representation of a Dyson Purecool (TP04/DP04) fan."""
def __init__(self, device):
"""Initialize the fan."""
self._device = device
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self._device.add_message_listener(self.on_message)
def on_message(self, message):
"""Call when new messages received from the fan."""
if isinstance(message, DysonPureCoolV2State):
_LOGGER.debug("Message received for fan device %s: %s", self.name, message)
self.schedule_update_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the display name of this fan."""
return self._device.name
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the fan."""
_LOGGER.debug("Turn on fan %s", self.name)
if speed is not None:
self.set_speed(speed)
else:
self._device.turn_on()
def set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
if speed == SPEED_LOW:
self._device.set_fan_speed(FanSpeed.FAN_SPEED_4)
elif speed == SPEED_MEDIUM:
self._device.set_fan_speed(FanSpeed.FAN_SPEED_7)
elif speed == SPEED_HIGH:
self._device.set_fan_speed(FanSpeed.FAN_SPEED_10)
def turn_off(self, **kwargs):
"""Turn off the fan."""
_LOGGER.debug("Turn off fan %s", self.name)
self._device.turn_off()
def set_dyson_speed(self, speed: str = None) -> None:
"""Set the exact speed of the purecool fan."""
_LOGGER.debug("Set exact speed for fan %s", self.name)
fan_speed = FanSpeed(f"{int(speed):04d}")
self._device.set_fan_speed(fan_speed)
def oscillate(self, oscillating: bool) -> None:
"""Turn on/off oscillating."""
_LOGGER.debug("Turn oscillation %s for device %s", oscillating, self.name)
if oscillating:
self._device.enable_oscillation()
else:
self._device.disable_oscillation()
def set_night_mode(self, night_mode: bool) -> None:
"""Turn on/off night mode."""
_LOGGER.debug("Turn night mode %s for device %s", night_mode, self.name)
if night_mode:
self._device.enable_night_mode()
else:
self._device.disable_night_mode()
def set_auto_mode(self, auto_mode: bool) -> None:
"""Turn auto mode on/off."""
_LOGGER.debug("Turn auto mode %s for device %s", auto_mode, self.name)
if auto_mode:
self._device.enable_auto_mode()
else:
self._device.disable_auto_mode()
def set_angle(self, angle_low: int, angle_high: int) -> None:
"""Set device angle."""
_LOGGER.debug(
"set low %s and high angle %s for device %s",
angle_low,
angle_high,
self.name,
)
self._device.enable_oscillation(angle_low, angle_high)
def set_flow_direction_front(self, flow_direction_front: bool) -> None:
"""Set frontal airflow direction."""
_LOGGER.debug(
"Set frontal flow direction to %s for device %s",
flow_direction_front,
self.name,
)
if flow_direction_front:
self._device.enable_frontal_direction()
else:
self._device.disable_frontal_direction()
def set_timer(self, timer) -> None:
"""Set timer."""
_LOGGER.debug("Set timer to %s for device %s", timer, self.name)
if timer == 0:
self._device.disable_sleep_timer()
else:
self._device.enable_sleep_timer(timer)
@property
def oscillating(self):
"""Return the oscillation state."""
return self._device.state and self._device.state.oscillation == "OION"
@property
def is_on(self):
"""Return true if the entity is on."""
if self._device.state:
return self._device.state.fan_power == "ON"
@property
def speed(self):
"""Return the current speed."""
speed_map = {
FanSpeed.FAN_SPEED_1.value: SPEED_LOW,
FanSpeed.FAN_SPEED_2.value: SPEED_LOW,
FanSpeed.FAN_SPEED_3.value: SPEED_LOW,
FanSpeed.FAN_SPEED_4.value: SPEED_LOW,
FanSpeed.FAN_SPEED_AUTO.value: SPEED_MEDIUM,
FanSpeed.FAN_SPEED_5.value: SPEED_MEDIUM,
FanSpeed.FAN_SPEED_6.value: SPEED_MEDIUM,
FanSpeed.FAN_SPEED_7.value: SPEED_MEDIUM,
FanSpeed.FAN_SPEED_8.value: SPEED_HIGH,
FanSpeed.FAN_SPEED_9.value: SPEED_HIGH,
FanSpeed.FAN_SPEED_10.value: SPEED_HIGH,
}
return speed_map[self._device.state.speed]
@property
def dyson_speed(self):
"""Return the current speed."""
if self._device.state:
if self._device.state.speed == FanSpeed.FAN_SPEED_AUTO.value:
return self._device.state.speed
return int(self._device.state.speed)
@property
def night_mode(self):
"""Return Night mode."""
return self._device.state.night_mode == "ON"
@property
def auto_mode(self):
"""Return Auto mode."""
return self._device.state.auto_mode == "ON"
@property
def angle_low(self):
"""Return angle high."""
return int(self._device.state.oscillation_angle_low)
@property
def angle_high(self):
"""Return angle low."""
return int(self._device.state.oscillation_angle_high)
@property
def flow_direction_front(self):
"""Return frontal flow direction."""
return self._device.state.front_direction == "ON"
@property
def timer(self):
"""Return timer."""
return self._device.state.sleep_timer
@property
def hepa_filter(self):
"""Return the HEPA filter state."""
return int(self._device.state.hepa_filter_state)
@property
def carbon_filter(self):
"""Return the carbon filter state."""
if self._device.state.carbon_filter_state == "INV":
return self._device.state.carbon_filter_state
return int(self._device.state.carbon_filter_state)
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return [SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
@property
def dyson_speed_list(self) -> list:
"""Get the list of available dyson speeds."""
return [
int(FanSpeed.FAN_SPEED_1.value),
int(FanSpeed.FAN_SPEED_2.value),
int(FanSpeed.FAN_SPEED_3.value),
int(FanSpeed.FAN_SPEED_4.value),
int(FanSpeed.FAN_SPEED_5.value),
int(FanSpeed.FAN_SPEED_6.value),
int(FanSpeed.FAN_SPEED_7.value),
int(FanSpeed.FAN_SPEED_8.value),
int(FanSpeed.FAN_SPEED_9.value),
int(FanSpeed.FAN_SPEED_10.value),
]
@property
def device_serial(self):
"""Return fan's serial number."""
return self._device.serial
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_OSCILLATE | SUPPORT_SET_SPEED
@property
def device_state_attributes(self) -> dict:
"""Return optional state attributes."""
return {
ATTR_NIGHT_MODE: self.night_mode,
ATTR_AUTO_MODE: self.auto_mode,
ATTR_ANGLE_LOW: self.angle_low,
ATTR_ANGLE_HIGH: self.angle_high,
ATTR_FLOW_DIRECTION_FRONT: self.flow_direction_front,
ATTR_TIMER: self.timer,
ATTR_HEPA_FILTER: self.hepa_filter,
ATTR_CARBON_FILTER: self.carbon_filter,
ATTR_DYSON_SPEED: self.dyson_speed,
ATTR_DYSON_SPEED_LIST: self.dyson_speed_list,
}
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
from functools import wraps # noqa
import json
import os
from ceilometerclient.v2 import client as ceilometer_client
from cinderclient import client as cinder_client
from django.conf import settings
from django.contrib.messages.storage import default_storage # noqa
from django.core.handlers import wsgi
from django.core import urlresolvers
from django.test.client import RequestFactory # noqa
from django.test import utils as django_test_utils
from django.utils.importlib import import_module # noqa
from django.utils import unittest
import glanceclient
from heatclient import client as heat_client
import httplib2
from keystoneclient.v2_0 import client as keystone_client
import mock
from mox3 import mox
from neutronclient.v2_0 import client as neutron_client
from novaclient.v2 import client as nova_client
from openstack_auth import user
from openstack_auth import utils
import six
from six import moves
from swiftclient import client as swift_client
from horizon import base
from horizon import conf
from horizon.test import helpers as horizon_helpers
from openstack_dashboard import api
from openstack_dashboard import context_processors
from openstack_dashboard.test.test_data import utils as test_utils
# Makes output of failing mox tests much easier to read.
wsgi.WSGIRequest.__repr__ = lambda self: "<class 'django.http.HttpRequest'>"
def create_stubs(stubs_to_create={}):
"""decorator to simplify setting up multiple stubs at once via mox
:param stubs_to_create: methods to stub in one or more modules
:type stubs_to_create: dict
The keys are python paths to the module containing the methods to mock.
To mock a method in openstack_dashboard/api/nova.py, the key is::
api.nova
The values are either a tuple of list of methods to mock in the module
indicated by the key.
For example::
('server_list',)
-or-
('flavor_list', 'server_list',)
-or-
['flavor_list', 'server_list']
Additionally, multiple modules can be mocked at once::
{
api.nova: ('flavor_list', 'server_list'),
api.glance: ('image_list_detailed',),
}
"""
if not isinstance(stubs_to_create, dict):
raise TypeError("create_stub must be passed a dict, but a %s was "
"given." % type(stubs_to_create).__name__)
def inner_stub_out(fn):
@wraps(fn)
def instance_stub_out(self, *args, **kwargs):
for key in stubs_to_create:
if not (isinstance(stubs_to_create[key], tuple) or
isinstance(stubs_to_create[key], list)):
raise TypeError("The values of the create_stub "
"dict must be lists or tuples, but "
"is a %s."
% type(stubs_to_create[key]).__name__)
for value in stubs_to_create[key]:
self.mox.StubOutWithMock(key, value)
return fn(self, *args, **kwargs)
return instance_stub_out
return inner_stub_out
class RequestFactoryWithMessages(RequestFactory):
def get(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).get(*args, **kwargs)
req.user = utils.get_user(req)
req.session = []
req._messages = default_storage(req)
return req
def post(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).post(*args, **kwargs)
req.user = utils.get_user(req)
req.session = []
req._messages = default_storage(req)
return req
@unittest.skipIf(os.environ.get('SKIP_UNITTESTS', False),
"The SKIP_UNITTESTS env variable is set.")
class TestCase(horizon_helpers.TestCase):
"""Specialized base test case class for Horizon.
It gives access to numerous additional features:
* A full suite of test data through various attached objects and
managers (e.g. ``self.servers``, ``self.user``, etc.). See the
docs for
:class:`~openstack_dashboard.test.test_data.utils.TestData`
for more information.
* The ``mox`` mocking framework via ``self.mox``.
* A set of request context data via ``self.context``.
* A ``RequestFactory`` class which supports Django's ``contrib.messages``
framework via ``self.factory``.
* A ready-to-go request object via ``self.request``.
* The ability to override specific time data controls for easier testing.
* Several handy additional assertion methods.
"""
def setUp(self):
def fake_conn_request(*args, **kwargs):
raise Exception("An external URI request tried to escape through "
"an httplib2 client. Args: %s, kwargs: %s"
% (args, kwargs))
self._real_conn_request = httplib2.Http._conn_request
httplib2.Http._conn_request = fake_conn_request
self._real_context_processor = context_processors.openstack
context_processors.openstack = lambda request: self.context
self.patchers = {}
self.add_panel_mocks()
super(TestCase, self).setUp()
def _setup_test_data(self):
super(TestCase, self)._setup_test_data()
test_utils.load_test_data(self)
self.context = {'authorized_tenants': self.tenants.list()}
def _setup_factory(self):
# For some magical reason we need a copy of this here.
self.factory = RequestFactoryWithMessages()
def _setup_user(self):
self._real_get_user = utils.get_user
tenants = self.context['authorized_tenants']
self.setActiveUser(id=self.user.id,
token=self.token,
username=self.user.name,
domain_id=self.domain.id,
tenant_id=self.tenant.id,
service_catalog=self.service_catalog,
authorized_tenants=tenants)
def _setup_request(self):
super(TestCase, self)._setup_request()
self.request.session['token'] = self.token.id
def add_panel_mocks(self):
"""Global mocks on panels that get called on all views."""
self.patchers['aggregates'] = mock.patch(
'openstack_dashboard.dashboards.admin'
'.aggregates.panel.Aggregates.can_access',
mock.Mock(return_value=True))
self.patchers['aggregates'].start()
def tearDown(self):
httplib2.Http._conn_request = self._real_conn_request
context_processors.openstack = self._real_context_processor
utils.get_user = self._real_get_user
mock.patch.stopall()
super(TestCase, self).tearDown()
def setActiveUser(self, id=None, token=None, username=None, tenant_id=None,
service_catalog=None, tenant_name=None, roles=None,
authorized_tenants=None, enabled=True, domain_id=None):
def get_user(request):
return user.User(id=id,
token=token,
user=username,
domain_id=domain_id,
tenant_id=tenant_id,
service_catalog=service_catalog,
roles=roles,
enabled=enabled,
authorized_tenants=authorized_tenants,
endpoint=settings.OPENSTACK_KEYSTONE_URL)
utils.get_user = get_user
def assertRedirectsNoFollow(self, response, expected_url):
"""Check for redirect.
Asserts that the given response issued a 302 redirect without
processing the view which is redirected to.
"""
assert (response.status_code / 100 == 3), \
"The response did not return a redirect."
self.assertEqual(response._headers.get('location', None),
('Location', settings.TESTSERVER + expected_url))
self.assertEqual(response.status_code, 302)
def assertNoFormErrors(self, response, context_name="form"):
"""Checks for no form errors.
Asserts that the response either does not contain a form in its
context, or that if it does, that form has no errors.
"""
context = getattr(response, "context", {})
if not context or context_name not in context:
return True
errors = response.context[context_name]._errors
assert len(errors) == 0, \
"Unexpected errors were found on the form: %s" % errors
def assertFormErrors(self, response, count=0, message=None,
context_name="form"):
"""Check for form errors.
Asserts that the response does contain a form in its
context, and that form has errors, if count were given,
it must match the exact numbers of errors
"""
context = getattr(response, "context", {})
assert (context and context_name in context), \
"The response did not contain a form."
errors = response.context[context_name]._errors
if count:
assert len(errors) == count, \
"%d errors were found on the form, %d expected" % \
(len(errors), count)
if message and message not in six.text_type(errors):
self.fail("Expected message not found, instead found: %s"
% ["%s: %s" % (key, [e for e in field_errors]) for
(key, field_errors) in errors.items()])
else:
assert len(errors) > 0, "No errors were found on the form"
def assertStatusCode(self, response, expected_code):
"""Validates an expected status code.
Matches camel case of other assert functions
"""
if response.status_code == expected_code:
return
self.fail('status code %r != %r: %s' % (response.status_code,
expected_code,
response.content))
def assertItemsCollectionEqual(self, response, items_list):
self.assertEqual(response.content,
'{"items": ' + json.dumps(items_list) + "}")
@staticmethod
def mock_rest_request(**args):
mock_args = {
'user.is_authenticated.return_value': True,
'is_ajax.return_value': True,
'policy.check.return_value': True,
'body': ''
}
mock_args.update(args)
return mock.Mock(**mock_args)
class BaseAdminViewTests(TestCase):
"""Sets an active user with the "admin" role.
For testing admin-only views and functionality.
"""
def setActiveUser(self, *args, **kwargs):
if "roles" not in kwargs:
kwargs['roles'] = [self.roles.admin._info]
super(BaseAdminViewTests, self).setActiveUser(*args, **kwargs)
def setSessionValues(self, **kwargs):
settings.SESSION_ENGINE = 'django.contrib.sessions.backends.file'
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
for key in kwargs:
store[key] = kwargs[key]
self.request.session[key] = kwargs[key]
store.save()
self.session = store
self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
class APITestCase(TestCase):
"""Testing APIs.
For use with tests which deal with the underlying clients rather than
stubbing out the openstack_dashboard.api.* methods.
"""
def setUp(self):
super(APITestCase, self).setUp()
utils.patch_middleware_get_user()
def fake_keystoneclient(request, admin=False):
"""Returns the stub keystoneclient.
Only necessary because the function takes too many arguments to
conveniently be a lambda.
"""
return self.stub_keystoneclient()
# Store the original clients
self._original_glanceclient = api.glance.glanceclient
self._original_keystoneclient = api.keystone.keystoneclient
self._original_novaclient = api.nova.novaclient
self._original_neutronclient = api.neutron.neutronclient
self._original_cinderclient = api.cinder.cinderclient
self._original_heatclient = api.heat.heatclient
self._original_ceilometerclient = api.ceilometer.ceilometerclient
# Replace the clients with our stubs.
api.glance.glanceclient = lambda request: self.stub_glanceclient()
api.keystone.keystoneclient = fake_keystoneclient
api.nova.novaclient = lambda request: self.stub_novaclient()
api.neutron.neutronclient = lambda request: self.stub_neutronclient()
api.cinder.cinderclient = lambda request: self.stub_cinderclient()
api.heat.heatclient = (lambda request, password=None:
self.stub_heatclient())
api.ceilometer.ceilometerclient = (lambda request:
self.stub_ceilometerclient())
def tearDown(self):
super(APITestCase, self).tearDown()
api.glance.glanceclient = self._original_glanceclient
api.nova.novaclient = self._original_novaclient
api.keystone.keystoneclient = self._original_keystoneclient
api.neutron.neutronclient = self._original_neutronclient
api.cinder.cinderclient = self._original_cinderclient
api.heat.heatclient = self._original_heatclient
api.ceilometer.ceilometerclient = self._original_ceilometerclient
def stub_novaclient(self):
if not hasattr(self, "novaclient"):
self.mox.StubOutWithMock(nova_client, 'Client')
self.novaclient = self.mox.CreateMock(nova_client.Client)
return self.novaclient
def stub_cinderclient(self):
if not hasattr(self, "cinderclient"):
self.mox.StubOutWithMock(cinder_client, 'Client')
self.cinderclient = self.mox.CreateMock(cinder_client.Client)
return self.cinderclient
def stub_keystoneclient(self):
if not hasattr(self, "keystoneclient"):
self.mox.StubOutWithMock(keystone_client, 'Client')
# NOTE(saschpe): Mock properties, MockObject.__init__ ignores them:
keystone_client.Client.auth_token = 'foo'
keystone_client.Client.service_catalog = None
keystone_client.Client.tenant_id = '1'
keystone_client.Client.tenant_name = 'tenant_1'
keystone_client.Client.management_url = ""
keystone_client.Client.__dir__ = lambda: []
self.keystoneclient = self.mox.CreateMock(keystone_client.Client)
return self.keystoneclient
def stub_glanceclient(self):
if not hasattr(self, "glanceclient"):
self.mox.StubOutWithMock(glanceclient, 'Client')
self.glanceclient = self.mox.CreateMock(glanceclient.Client)
return self.glanceclient
def stub_neutronclient(self):
if not hasattr(self, "neutronclient"):
self.mox.StubOutWithMock(neutron_client, 'Client')
self.neutronclient = self.mox.CreateMock(neutron_client.Client)
return self.neutronclient
def stub_swiftclient(self, expected_calls=1):
if not hasattr(self, "swiftclient"):
self.mox.StubOutWithMock(swift_client, 'Connection')
self.swiftclient = self.mox.CreateMock(swift_client.Connection)
while expected_calls:
swift_client.Connection(None,
mox.IgnoreArg(),
None,
preauthtoken=mox.IgnoreArg(),
preauthurl=mox.IgnoreArg(),
cacert=None,
insecure=False,
auth_version="2.0") \
.AndReturn(self.swiftclient)
expected_calls -= 1
return self.swiftclient
def stub_heatclient(self):
if not hasattr(self, "heatclient"):
self.mox.StubOutWithMock(heat_client, 'Client')
self.heatclient = self.mox.CreateMock(heat_client.Client)
return self.heatclient
def stub_ceilometerclient(self):
if not hasattr(self, "ceilometerclient"):
self.mox.StubOutWithMock(ceilometer_client, 'Client')
self.ceilometerclient = self.mox.\
CreateMock(ceilometer_client.Client)
return self.ceilometerclient
@unittest.skipUnless(os.environ.get('WITH_SELENIUM', False),
"The WITH_SELENIUM env variable is not set.")
class SeleniumTestCase(horizon_helpers.SeleniumTestCase):
def setUp(self):
super(SeleniumTestCase, self).setUp()
test_utils.load_test_data(self)
self.mox = mox.Mox()
self._real_get_user = utils.get_user
self.setActiveUser(id=self.user.id,
token=self.token,
username=self.user.name,
tenant_id=self.tenant.id,
service_catalog=self.service_catalog,
authorized_tenants=self.tenants.list())
self.patchers = {}
self.patchers['aggregates'] = mock.patch(
'openstack_dashboard.dashboards.admin'
'.aggregates.panel.Aggregates.can_access',
mock.Mock(return_value=True))
self.patchers['aggregates'].start()
os.environ["HORIZON_TEST_RUN"] = "True"
def tearDown(self):
self.mox.UnsetStubs()
utils.get_user = self._real_get_user
mock.patch.stopall()
self.mox.VerifyAll()
del os.environ["HORIZON_TEST_RUN"]
def setActiveUser(self, id=None, token=None, username=None, tenant_id=None,
service_catalog=None, tenant_name=None, roles=None,
authorized_tenants=None, enabled=True):
def get_user(request):
return user.User(id=id,
token=token,
user=username,
tenant_id=tenant_id,
service_catalog=service_catalog,
roles=roles,
enabled=enabled,
authorized_tenants=authorized_tenants,
endpoint=settings.OPENSTACK_KEYSTONE_URL)
utils.get_user = get_user
class SeleniumAdminTestCase(SeleniumTestCase):
"""Version of AdminTestCase for Selenium.
Sets an active user with the "admin" role for testing admin-only views and
functionality.
"""
def setActiveUser(self, *args, **kwargs):
if "roles" not in kwargs:
kwargs['roles'] = [self.roles.admin._info]
super(SeleniumAdminTestCase, self).setActiveUser(*args, **kwargs)
def my_custom_sort(flavor):
sort_order = {
'm1.secret': 0,
'm1.tiny': 1,
'm1.massive': 2,
'm1.metadata': 3,
}
return sort_order[flavor.name]
class PluginTestCase(TestCase):
"""Test case for testing plugin system of Horizon.
For use with tests which deal with the pluggable dashboard and panel
configuration, it takes care of backing up and restoring the Horizon
configuration.
"""
def setUp(self):
super(PluginTestCase, self).setUp()
self.old_horizon_config = conf.HORIZON_CONFIG
conf.HORIZON_CONFIG = conf.LazySettings()
base.Horizon._urls()
# Store our original dashboards
self._discovered_dashboards = base.Horizon._registry.keys()
# Gather up and store our original panels for each dashboard
self._discovered_panels = {}
for dash in self._discovered_dashboards:
panels = base.Horizon._registry[dash]._registry.keys()
self._discovered_panels[dash] = panels
def tearDown(self):
super(PluginTestCase, self).tearDown()
conf.HORIZON_CONFIG = self.old_horizon_config
# Destroy our singleton and re-create it.
base.HorizonSite._instance = None
del base.Horizon
base.Horizon = base.HorizonSite()
# Reload the convenience references to Horizon stored in __init__
moves.reload_module(import_module("horizon"))
# Re-register our original dashboards and panels.
# This is necessary because autodiscovery only works on the first
# import, and calling reload introduces innumerable additional
# problems. Manual re-registration is the only good way for testing.
for dash in self._discovered_dashboards:
base.Horizon.register(dash)
for panel in self._discovered_panels[dash]:
dash.register(panel)
self._reload_urls()
def _reload_urls(self):
"""CLeans up URLs.
Clears out the URL caches, reloads the root urls module, and
re-triggers the autodiscovery mechanism for Horizon. Allows URLs
to be re-calculated after registering new dashboards. Useful
only for testing and should never be used on a live site.
"""
urlresolvers.clear_url_caches()
moves.reload_module(import_module(settings.ROOT_URLCONF))
base.Horizon._urls()
class update_settings(django_test_utils.override_settings):
"""override_settings which allows override an item in dict.
django original override_settings replaces a dict completely,
however OpenStack dashboard setting has many dictionary configuration
and there are test case where we want to override only one item in
a dictionary and keep other items in the dictionary.
This version of override_settings allows this if keep_dict is True.
If keep_dict False is specified, the original behavior of
Django override_settings is used.
"""
def __init__(self, keep_dict=True, **kwargs):
if keep_dict:
for key, new_value in kwargs.items():
value = getattr(settings, key, None)
if (isinstance(new_value, collections.Mapping) and
isinstance(value, collections.Mapping)):
copied = copy.copy(value)
copied.update(new_value)
kwargs[key] = copied
super(update_settings, self).__init__(**kwargs)
| |
from django import forms
from django.db.models import Count
from dcim.models import Site, Rack, Device, Interface
from extras.forms import CustomFieldForm, CustomFieldBulkEditForm, CustomFieldFilterForm
from tenancy.forms import TenancyForm
from tenancy.models import Tenant
from utilities.forms import (
APISelect, BootstrapMixin, BulkEditNullBooleanSelect, BulkImportForm, ChainedModelChoiceField, CSVDataField,
ExpandableIPAddressField, FilterChoiceField, Livesearch, ReturnURLForm, SlugField, add_blank_choice,
)
from .models import (
Aggregate, IPAddress, IPADDRESS_STATUS_CHOICES, Prefix, PREFIX_STATUS_CHOICES, RIR, Role, Service, VLAN,
VLANGroup, VLAN_STATUS_CHOICES, VRF,
)
IP_FAMILY_CHOICES = [
('', 'All'),
(4, 'IPv4'),
(6, 'IPv6'),
]
PREFIX_MASK_LENGTH_CHOICES = [
('', '---------'),
] + [(i, i) for i in range(1, 128)]
IPADDRESS_MASK_LENGTH_CHOICES = PREFIX_MASK_LENGTH_CHOICES + [(128, 128)]
#
# VRFs
#
class VRFForm(BootstrapMixin, TenancyForm, CustomFieldForm):
class Meta:
model = VRF
fields = ['name', 'rd', 'enforce_unique', 'description', 'tenant_group', 'tenant']
labels = {
'rd': "RD",
}
help_texts = {
'rd': "Route distinguisher in any format",
}
class VRFFromCSVForm(forms.ModelForm):
tenant = forms.ModelChoiceField(Tenant.objects.all(), to_field_name='name', required=False,
error_messages={'invalid_choice': 'Tenant not found.'})
class Meta:
model = VRF
fields = ['name', 'rd', 'tenant', 'enforce_unique', 'description']
class VRFImportForm(BootstrapMixin, BulkImportForm):
csv = CSVDataField(csv_form=VRFFromCSVForm)
class VRFBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=VRF.objects.all(), widget=forms.MultipleHiddenInput)
tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)
enforce_unique = forms.NullBooleanField(
required=False, widget=BulkEditNullBooleanSelect, label='Enforce unique space'
)
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['tenant', 'description']
class VRFFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = VRF
q = forms.CharField(required=False, label='Search')
tenant = FilterChoiceField(queryset=Tenant.objects.annotate(filter_count=Count('vrfs')), to_field_name='slug',
null_option=(0, None))
#
# RIRs
#
class RIRForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = RIR
fields = ['name', 'slug', 'is_private']
class RIRFilterForm(BootstrapMixin, forms.Form):
is_private = forms.NullBooleanField(required=False, label='Private', widget=forms.Select(choices=[
('', '---------'),
('True', 'Yes'),
('False', 'No'),
]))
#
# Aggregates
#
class AggregateForm(BootstrapMixin, CustomFieldForm):
class Meta:
model = Aggregate
fields = ['prefix', 'rir', 'date_added', 'description']
help_texts = {
'prefix': "IPv4 or IPv6 network",
'rir': "Regional Internet Registry responsible for this prefix",
'date_added': "Format: YYYY-MM-DD",
}
class AggregateFromCSVForm(forms.ModelForm):
rir = forms.ModelChoiceField(queryset=RIR.objects.all(), to_field_name='name',
error_messages={'invalid_choice': 'RIR not found.'})
class Meta:
model = Aggregate
fields = ['prefix', 'rir', 'date_added', 'description']
class AggregateImportForm(BootstrapMixin, BulkImportForm):
csv = CSVDataField(csv_form=AggregateFromCSVForm)
class AggregateBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=Aggregate.objects.all(), widget=forms.MultipleHiddenInput)
rir = forms.ModelChoiceField(queryset=RIR.objects.all(), required=False, label='RIR')
date_added = forms.DateField(required=False)
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['date_added', 'description']
class AggregateFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = Aggregate
q = forms.CharField(required=False, label='Search')
family = forms.ChoiceField(required=False, choices=IP_FAMILY_CHOICES, label='Address Family')
rir = FilterChoiceField(
queryset=RIR.objects.annotate(filter_count=Count('aggregates')),
to_field_name='slug',
label='RIR'
)
#
# Roles
#
class RoleForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Role
fields = ['name', 'slug']
#
# Prefixes
#
class PrefixForm(BootstrapMixin, TenancyForm, CustomFieldForm):
site = forms.ModelChoiceField(
queryset=Site.objects.all(), required=False, label='Site', widget=forms.Select(
attrs={'filter-for': 'vlan', 'nullable': 'true'}
)
)
vlan = ChainedModelChoiceField(
queryset=VLAN.objects.all(), chains={'site': 'site'}, required=False, label='VLAN', widget=APISelect(
api_url='/api/ipam/vlans/?site_id={{site}}', display_field='display_name'
)
)
class Meta:
model = Prefix
fields = ['prefix', 'vrf', 'site', 'vlan', 'status', 'role', 'is_pool', 'description', 'tenant_group', 'tenant']
def __init__(self, *args, **kwargs):
super(PrefixForm, self).__init__(*args, **kwargs)
self.fields['vrf'].empty_label = 'Global'
class PrefixFromCSVForm(forms.ModelForm):
vrf = forms.ModelChoiceField(queryset=VRF.objects.all(), required=False, to_field_name='rd',
error_messages={'invalid_choice': 'VRF not found.'})
tenant = forms.ModelChoiceField(Tenant.objects.all(), to_field_name='name', required=False,
error_messages={'invalid_choice': 'Tenant not found.'})
site = forms.ModelChoiceField(queryset=Site.objects.all(), required=False, to_field_name='name',
error_messages={'invalid_choice': 'Site not found.'})
vlan_group_name = forms.CharField(required=False)
vlan_vid = forms.IntegerField(required=False)
status_name = forms.ChoiceField(choices=[(s[1], s[0]) for s in PREFIX_STATUS_CHOICES])
role = forms.ModelChoiceField(queryset=Role.objects.all(), required=False, to_field_name='name',
error_messages={'invalid_choice': 'Invalid role.'})
class Meta:
model = Prefix
fields = ['prefix', 'vrf', 'tenant', 'site', 'vlan_group_name', 'vlan_vid', 'status_name', 'role', 'is_pool',
'description']
def clean(self):
super(PrefixFromCSVForm, self).clean()
site = self.cleaned_data.get('site')
vlan_group_name = self.cleaned_data.get('vlan_group_name')
vlan_vid = self.cleaned_data.get('vlan_vid')
vlan_group = None
# Validate VLAN group
if vlan_group_name:
try:
vlan_group = VLANGroup.objects.get(site=site, name=vlan_group_name)
except VLANGroup.DoesNotExist:
if site:
self.add_error('vlan_group_name', "Invalid VLAN group ({} - {}).".format(site, vlan_group_name))
else:
self.add_error('vlan_group_name', "Invalid global VLAN group ({}).".format(vlan_group_name))
# Validate VLAN
if vlan_vid:
try:
self.instance.vlan = VLAN.objects.get(site=site, group=vlan_group, vid=vlan_vid)
except VLAN.DoesNotExist:
if site:
self.add_error('vlan_vid', "Invalid VLAN ID ({}) for site {}.".format(vlan_vid, site))
elif vlan_group:
self.add_error('vlan_vid', "Invalid VLAN ID ({}) for group {}.".format(vlan_vid, vlan_group_name))
elif not vlan_group_name:
self.add_error('vlan_vid', "Invalid global VLAN ID ({}).".format(vlan_vid))
except VLAN.MultipleObjectsReturned:
self.add_error('vlan_vid', "Multiple VLANs found ({} - VID {})".format(site, vlan_vid))
def save(self, *args, **kwargs):
# Assign Prefix status by name
self.instance.status = dict(self.fields['status_name'].choices)[self.cleaned_data['status_name']]
return super(PrefixFromCSVForm, self).save(*args, **kwargs)
class PrefixImportForm(BootstrapMixin, BulkImportForm):
csv = CSVDataField(csv_form=PrefixFromCSVForm)
class PrefixBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=Prefix.objects.all(), widget=forms.MultipleHiddenInput)
site = forms.ModelChoiceField(queryset=Site.objects.all(), required=False)
vrf = forms.ModelChoiceField(queryset=VRF.objects.all(), required=False, label='VRF')
tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)
status = forms.ChoiceField(choices=add_blank_choice(PREFIX_STATUS_CHOICES), required=False)
role = forms.ModelChoiceField(queryset=Role.objects.all(), required=False)
is_pool = forms.NullBooleanField(required=False, widget=BulkEditNullBooleanSelect, label='Is a pool')
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['site', 'vrf', 'tenant', 'role', 'description']
def prefix_status_choices():
status_counts = {}
for status in Prefix.objects.values('status').annotate(count=Count('status')).order_by('status'):
status_counts[status['status']] = status['count']
return [(s[0], u'{} ({})'.format(s[1], status_counts.get(s[0], 0))) for s in PREFIX_STATUS_CHOICES]
class PrefixFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = Prefix
q = forms.CharField(required=False, label='Search')
parent = forms.CharField(required=False, label='Parent prefix', widget=forms.TextInput(attrs={
'placeholder': 'Prefix',
}))
family = forms.ChoiceField(required=False, choices=IP_FAMILY_CHOICES, label='Address family')
mask_length = forms.ChoiceField(required=False, choices=PREFIX_MASK_LENGTH_CHOICES, label='Mask length')
vrf = FilterChoiceField(
queryset=VRF.objects.annotate(filter_count=Count('prefixes')),
to_field_name='rd',
label='VRF',
null_option=(0, 'Global')
)
tenant = FilterChoiceField(
queryset=Tenant.objects.annotate(filter_count=Count('prefixes')),
to_field_name='slug',
null_option=(0, 'None')
)
status = forms.MultipleChoiceField(choices=prefix_status_choices, required=False)
site = FilterChoiceField(
queryset=Site.objects.annotate(filter_count=Count('prefixes')),
to_field_name='slug',
null_option=(0, 'None')
)
role = FilterChoiceField(
queryset=Role.objects.annotate(filter_count=Count('prefixes')),
to_field_name='slug',
null_option=(0, 'None')
)
expand = forms.BooleanField(required=False, label='Expand prefix hierarchy')
#
# IP addresses
#
class IPAddressForm(BootstrapMixin, TenancyForm, ReturnURLForm, CustomFieldForm):
interface_site = forms.ModelChoiceField(
queryset=Site.objects.all(),
required=False,
label='Site',
widget=forms.Select(
attrs={'filter-for': 'interface_rack'}
)
)
interface_rack = ChainedModelChoiceField(
queryset=Rack.objects.all(),
chains={'site': 'interface_site'},
required=False,
label='Rack',
widget=APISelect(
api_url='/api/dcim/racks/?site_id={{interface_site}}',
display_field='display_name',
attrs={'filter-for': 'interface_device', 'nullable': 'true'}
)
)
interface_device = ChainedModelChoiceField(
queryset=Device.objects.all(),
chains={'site': 'interface_site', 'rack': 'interface_rack'},
required=False,
label='Device',
widget=APISelect(
api_url='/api/dcim/devices/?site_id={{interface_site}}&rack_id={{interface_rack}}',
display_field='display_name',
attrs={'filter-for': 'interface'}
)
)
interface = ChainedModelChoiceField(
queryset=Interface.objects.all(),
chains={'device': 'interface_device'},
required=False,
widget=APISelect(
api_url='/api/dcim/interfaces/?device_id={{interface_device}}'
)
)
nat_site = forms.ModelChoiceField(
queryset=Site.objects.all(),
required=False,
label='Site',
widget=forms.Select(
attrs={'filter-for': 'nat_device'}
)
)
nat_rack = ChainedModelChoiceField(
queryset=Rack.objects.all(),
chains={'site': 'nat_site'},
required=False,
label='Rack',
widget=APISelect(
api_url='/api/dcim/racks/?site_id={{interface_site}}',
display_field='display_name',
attrs={'filter-for': 'nat_device', 'nullable': 'true'}
)
)
nat_device = ChainedModelChoiceField(
queryset=Device.objects.all(),
chains={'site': 'nat_site'},
required=False,
label='Device',
widget=APISelect(
api_url='/api/dcim/devices/?site_id={{nat_site}}',
display_field='display_name',
attrs={'filter-for': 'nat_inside'}
)
)
nat_inside = ChainedModelChoiceField(
queryset=IPAddress.objects.all(),
chains={'interface__device': 'nat_device'},
required=False,
label='IP Address',
widget=APISelect(
api_url='/api/ipam/ip-addresses/?device_id={{nat_device}}',
display_field='address'
)
)
livesearch = forms.CharField(
required=False,
label='IP Address',
widget=Livesearch(
query_key='q',
query_url='ipam-api:ipaddress-list',
field_to_update='nat_inside',
obj_label='address'
)
)
primary_for_device = forms.BooleanField(required=False, label='Make this the primary IP for the device')
class Meta:
model = IPAddress
fields = [
'address', 'vrf', 'status', 'description', 'interface', 'primary_for_device', 'nat_inside', 'tenant_group',
'tenant',
]
def __init__(self, *args, **kwargs):
# Initialize helper selectors
instance = kwargs.get('instance')
initial = kwargs.get('initial', {})
if instance and instance.interface is not None:
initial['interface_site'] = instance.interface.device.site
initial['interface_rack'] = instance.interface.device.rack
initial['interface_device'] = instance.interface.device
if instance and instance.nat_inside is not None:
initial['nat_site'] = instance.nat_inside.device.site
initial['nat_rack'] = instance.nat_inside.device.rack
initial['nat_device'] = instance.nat_inside.device
kwargs['initial'] = initial
super(IPAddressForm, self).__init__(*args, **kwargs)
self.fields['vrf'].empty_label = 'Global'
# Initialize primary_for_device if IP address is already assigned
if self.instance.interface is not None:
device = self.instance.interface.device
if (
self.instance.address.version == 4 and device.primary_ip4 == self.instance or
self.instance.address.version == 6 and device.primary_ip6 == self.instance
):
self.initial['primary_for_device'] = True
def clean(self):
super(IPAddressForm, self).clean()
# Primary IP assignment is only available if an interface has been assigned.
if self.cleaned_data.get('primary_for_device') and not self.cleaned_data.get('interface'):
self.add_error(
'primary_for_device', "Only IP addresses assigned to an interface can be designated as primary IPs."
)
def save(self, *args, **kwargs):
ipaddress = super(IPAddressForm, self).save(*args, **kwargs)
# Assign this IPAddress as the primary for the associated Device.
if self.cleaned_data['primary_for_device']:
device = self.cleaned_data['interface'].device
if ipaddress.address.version == 4:
device.primary_ip4 = ipaddress
else:
device.primary_ip6 = ipaddress
device.save()
# Clear assignment as primary for device if set.
else:
try:
if ipaddress.address.version == 4:
device = ipaddress.primary_ip4_for
device.primary_ip4 = None
else:
device = ipaddress.primary_ip6_for
device.primary_ip6 = None
device.save()
except Device.DoesNotExist:
pass
return ipaddress
class IPAddressPatternForm(BootstrapMixin, forms.Form):
pattern = ExpandableIPAddressField(label='Address pattern')
class IPAddressBulkAddForm(BootstrapMixin, TenancyForm, CustomFieldForm):
class Meta:
model = IPAddress
fields = ['address', 'status', 'vrf', 'description', 'tenant_group', 'tenant']
def __init__(self, *args, **kwargs):
super(IPAddressBulkAddForm, self).__init__(*args, **kwargs)
self.fields['vrf'].empty_label = 'Global'
class IPAddressFromCSVForm(forms.ModelForm):
vrf = forms.ModelChoiceField(queryset=VRF.objects.all(), required=False, to_field_name='rd',
error_messages={'invalid_choice': 'VRF not found.'})
tenant = forms.ModelChoiceField(Tenant.objects.all(), to_field_name='name', required=False,
error_messages={'invalid_choice': 'Tenant not found.'})
status_name = forms.ChoiceField(choices=[(s[1], s[0]) for s in IPADDRESS_STATUS_CHOICES])
device = forms.ModelChoiceField(queryset=Device.objects.all(), required=False, to_field_name='name',
error_messages={'invalid_choice': 'Device not found.'})
interface_name = forms.CharField(required=False)
is_primary = forms.BooleanField(required=False)
class Meta:
model = IPAddress
fields = ['address', 'vrf', 'tenant', 'status_name', 'device', 'interface_name', 'is_primary', 'description']
def clean(self):
device = self.cleaned_data.get('device')
interface_name = self.cleaned_data.get('interface_name')
is_primary = self.cleaned_data.get('is_primary')
# Validate interface
if device and interface_name:
try:
Interface.objects.get(device=device, name=interface_name)
except Interface.DoesNotExist:
self.add_error('interface_name', "Invalid interface ({}) for {}".format(interface_name, device))
elif device and not interface_name:
self.add_error('interface_name', "Device set ({}) but interface missing".format(device))
elif interface_name and not device:
self.add_error('device', "Interface set ({}) but device missing or invalid".format(interface_name))
# Validate is_primary
if is_primary and not device:
self.add_error('is_primary', "No device specified; cannot set as primary IP")
def save(self, *args, **kwargs):
# Assign status by name
self.instance.status = dict(self.fields['status_name'].choices)[self.cleaned_data['status_name']]
# Set interface
if self.cleaned_data['device'] and self.cleaned_data['interface_name']:
self.instance.interface = Interface.objects.get(device=self.cleaned_data['device'],
name=self.cleaned_data['interface_name'])
# Set as primary for device
if self.cleaned_data['is_primary']:
if self.instance.address.version == 4:
self.instance.primary_ip4_for = self.cleaned_data['device']
elif self.instance.address.version == 6:
self.instance.primary_ip6_for = self.cleaned_data['device']
return super(IPAddressFromCSVForm, self).save(*args, **kwargs)
class IPAddressImportForm(BootstrapMixin, BulkImportForm):
csv = CSVDataField(csv_form=IPAddressFromCSVForm)
class IPAddressBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=IPAddress.objects.all(), widget=forms.MultipleHiddenInput)
vrf = forms.ModelChoiceField(queryset=VRF.objects.all(), required=False, label='VRF')
tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)
status = forms.ChoiceField(choices=add_blank_choice(IPADDRESS_STATUS_CHOICES), required=False)
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['vrf', 'tenant', 'description']
def ipaddress_status_choices():
status_counts = {}
for status in IPAddress.objects.values('status').annotate(count=Count('status')).order_by('status'):
status_counts[status['status']] = status['count']
return [(s[0], u'{} ({})'.format(s[1], status_counts.get(s[0], 0))) for s in IPADDRESS_STATUS_CHOICES]
class IPAddressFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = IPAddress
q = forms.CharField(required=False, label='Search')
parent = forms.CharField(required=False, label='Parent Prefix', widget=forms.TextInput(attrs={
'placeholder': 'Prefix',
}))
family = forms.ChoiceField(required=False, choices=IP_FAMILY_CHOICES, label='Address family')
mask_length = forms.ChoiceField(required=False, choices=IPADDRESS_MASK_LENGTH_CHOICES, label='Mask length')
vrf = FilterChoiceField(
queryset=VRF.objects.annotate(filter_count=Count('ip_addresses')),
to_field_name='rd',
label='VRF',
null_option=(0, 'Global')
)
tenant = FilterChoiceField(
queryset=Tenant.objects.annotate(filter_count=Count('ip_addresses')),
to_field_name='slug',
null_option=(0, 'None')
)
status = forms.MultipleChoiceField(choices=ipaddress_status_choices, required=False)
#
# VLAN groups
#
class VLANGroupForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = VLANGroup
fields = ['site', 'name', 'slug']
class VLANGroupFilterForm(BootstrapMixin, forms.Form):
site = FilterChoiceField(
queryset=Site.objects.annotate(filter_count=Count('vlan_groups')),
to_field_name='slug',
null_option=(0, 'Global')
)
#
# VLANs
#
class VLANForm(BootstrapMixin, TenancyForm, CustomFieldForm):
site = forms.ModelChoiceField(
queryset=Site.objects.all(),
widget=forms.Select(
attrs={'filter-for': 'group', 'nullable': 'true'}
)
)
group = ChainedModelChoiceField(
queryset=VLANGroup.objects.all(),
chains={'site': 'site'},
required=False,
label='Group',
widget=APISelect(
api_url='/api/ipam/vlan-groups/?site_id={{site}}',
)
)
class Meta:
model = VLAN
fields = ['site', 'group', 'vid', 'name', 'status', 'role', 'description', 'tenant_group', 'tenant']
help_texts = {
'site': "Leave blank if this VLAN spans multiple sites",
'group': "VLAN group (optional)",
'vid': "Configured VLAN ID",
'name': "Configured VLAN name",
'status': "Operational status of this VLAN",
'role': "The primary function of this VLAN",
}
class VLANFromCSVForm(forms.ModelForm):
site = forms.ModelChoiceField(
queryset=Site.objects.all(), required=False, to_field_name='name',
error_messages={'invalid_choice': 'Site not found.'}
)
group_name = forms.CharField(required=False)
tenant = forms.ModelChoiceField(
Tenant.objects.all(), to_field_name='name', required=False,
error_messages={'invalid_choice': 'Tenant not found.'}
)
status_name = forms.ChoiceField(choices=[(s[1], s[0]) for s in VLAN_STATUS_CHOICES])
role = forms.ModelChoiceField(
queryset=Role.objects.all(), required=False, to_field_name='name',
error_messages={'invalid_choice': 'Invalid role.'}
)
class Meta:
model = VLAN
fields = ['site', 'group_name', 'vid', 'name', 'tenant', 'status_name', 'role', 'description']
def clean(self):
super(VLANFromCSVForm, self).clean()
# Validate VLANGroup
group_name = self.cleaned_data.get('group_name')
if group_name:
try:
VLANGroup.objects.get(site=self.cleaned_data.get('site'), name=group_name)
except VLANGroup.DoesNotExist:
self.add_error('group_name', "Invalid VLAN group {}.".format(group_name))
def save(self, *args, **kwargs):
vlan = super(VLANFromCSVForm, self).save(commit=False)
# Assign VLANGroup by site and name
if self.cleaned_data['group_name']:
vlan.group = VLANGroup.objects.get(site=self.cleaned_data['site'], name=self.cleaned_data['group_name'])
# Assign VLAN status by name
vlan.status = dict(self.fields['status_name'].choices)[self.cleaned_data['status_name']]
if kwargs.get('commit'):
vlan.save()
return vlan
class VLANImportForm(BootstrapMixin, BulkImportForm):
csv = CSVDataField(csv_form=VLANFromCSVForm)
class VLANBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=VLAN.objects.all(), widget=forms.MultipleHiddenInput)
site = forms.ModelChoiceField(queryset=Site.objects.all(), required=False)
group = forms.ModelChoiceField(queryset=VLANGroup.objects.all(), required=False)
tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)
status = forms.ChoiceField(choices=add_blank_choice(VLAN_STATUS_CHOICES), required=False)
role = forms.ModelChoiceField(queryset=Role.objects.all(), required=False)
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['site', 'group', 'tenant', 'role', 'description']
def vlan_status_choices():
status_counts = {}
for status in VLAN.objects.values('status').annotate(count=Count('status')).order_by('status'):
status_counts[status['status']] = status['count']
return [(s[0], u'{} ({})'.format(s[1], status_counts.get(s[0], 0))) for s in VLAN_STATUS_CHOICES]
class VLANFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = VLAN
q = forms.CharField(required=False, label='Search')
site = FilterChoiceField(
queryset=Site.objects.annotate(filter_count=Count('vlans')),
to_field_name='slug',
null_option=(0, 'Global')
)
group_id = FilterChoiceField(
queryset=VLANGroup.objects.annotate(filter_count=Count('vlans')),
label='VLAN group',
null_option=(0, 'None')
)
tenant = FilterChoiceField(
queryset=Tenant.objects.annotate(filter_count=Count('vlans')),
to_field_name='slug',
null_option=(0, 'None')
)
status = forms.MultipleChoiceField(choices=vlan_status_choices, required=False)
role = FilterChoiceField(
queryset=Role.objects.annotate(filter_count=Count('vlans')),
to_field_name='slug',
null_option=(0, 'None')
)
#
# Services
#
class ServiceForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = Service
fields = ['name', 'protocol', 'port', 'ipaddresses', 'description']
help_texts = {
'ipaddresses': "IP address assignment is optional. If no IPs are selected, the service is assumed to be "
"reachable via all IPs assigned to the device.",
}
def __init__(self, *args, **kwargs):
super(ServiceForm, self).__init__(*args, **kwargs)
# Limit IP address choices to those assigned to interfaces of the parent device
self.fields['ipaddresses'].queryset = IPAddress.objects.filter(interface__device=self.instance.device)
| |
import tensorflow as tf
import numpy as np
class BaseModel(object):
"""Holds code shared between all the different model variants."""
def __init__(self, batch_size, max_sequence_len, out_vocab_size, c2v,
dropout_keep_prob=0.0):
self._batch_size = batch_size
self._dropout_keep_prob = dropout_keep_prob
self._out_vocab_size = out_vocab_size
self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len],
name='x')
self.y = tf.placeholder(tf.float32, [batch_size, out_vocab_size],
name='y')
# The bidirectional rnn code requires seq_lens as int64
self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens')
self.example_weights = tf.placeholder(tf.float32, [batch_size],
name='example_weights')
embeddings = c2v.GetEmbeddings(self.x)
self._inputs = [tf.squeeze(input_, [1]) for input_ in
tf.split(1, max_sequence_len, embeddings)]
# Need to prepare a mask to zero out the padding symbols.
# Make a batch_size x max_sequence_len matrix where each
# row contains the length repeated max_sequence_len times.
lengths_transposed = tf.expand_dims(tf.to_int32(self.seq_lens), 1)
lengths_tiled = tf.tile(lengths_transposed, [1, max_sequence_len])
# Make a matrix where each row contains [0, 1, ..., max_sequence_len]
r = tf.range(0, max_sequence_len, 1)
range_row = tf.expand_dims(r, 0)
range_tiled = tf.tile(range_row, [batch_size, 1])
# Use the logical operations to create a mask
indicator = tf.less(range_tiled, lengths_tiled)
sz = [batch_size, max_sequence_len]
self._mask = tf.select(indicator, tf.ones(sz), tf.zeros(sz))
def _DoPredictions(self, in_size, mats, class_weights=None):
"""Takes in an array of states and calculates predictions.
Get the cross-entropy for each example in the vector self._xent.
Args:
in_size: size of the hidden state vectors
mats: list of hidden state vectors
"""
pred_mat = tf.get_variable('pred_mat',
[in_size, self._out_vocab_size])
pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size])
# Make a prediction on every word.
def GetWordPred(o_):
logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
return tf.nn.softmax(logits)
self.preds_by_word = tf.pack([GetWordPred(o_) for o_ in mats])
self.cs = self._mask / tf.reduce_sum(self._mask, 1, keep_dims=True)
# The final prediction is the average of the predictions for each word
# weighted by the individual confidence/utility scores.
preds_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]),
tf.reshape(self.preds_by_word,
[-1, self._out_vocab_size]))
preds_weighted_reshaped = tf.reshape(preds_weighted,
self.preds_by_word.get_shape())
self.probs = tf.reduce_sum(preds_weighted_reshaped, 0)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=class_weights)
class WordAvgModel(BaseModel): #formerly SimpleModel
"""A bag of word /predictions/."""
def __init__(self, out_vocab_size=None,
batch_size=10,
model_params=None,
c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
super(WordAvgModel, self).__init__(batch_size, max_sequence_len,
out_vocab_size, c2v)
super(WordAvgModel, self)._DoPredictions(c2v.embedding_dims,
self._inputs)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class WordSeqModel(BaseModel):
"""A bag of word embeddings."""
def __init__(self, out_vocab_size=None,
batch_size=10,
model_params=None,
c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
super(WordSeqModel, self).__init__(batch_size, max_sequence_len,
out_vocab_size, c2v)
in_size = self._inputs[0].get_shape()[1].value
# Also, output confidence scores at every word.
confidence_mat = tf.get_variable('confidence_mat', [in_size, 1])
confidence_scores = tf.concat(1, [tf.matmul(o_, confidence_mat)
for o_ in self._inputs])
# dropout on confidence_scores
random_tensor = (1.0 - self._dropout_keep_prob +
tf.random_uniform(tf.shape(confidence_scores)))
binary_tensor = -50.0 * tf.floor(random_tensor)
csshape = confidence_scores.get_shape()
self.cs = tf.nn.softmax(tf.constant(1.0, shape=csshape))
# The final prediction is the average of the predictions for each word
# weighted by the individual confidence/utility scores.
wvs = tf.pack(self._inputs)
wvs_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]),
tf.reshape(wvs, [-1, in_size]))
wvs_weighted_reshaped = tf.reshape(wvs_weighted, wvs.get_shape())
wvsum = tf.reduce_sum(wvs_weighted_reshaped,0)
pred_mat = tf.get_variable('pred_mat', [in_size, self._out_vocab_size])
pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size])
# Make a prediction for each tweet.
def GetWordPred(o_):
logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
return tf.nn.softmax(logits)
preds = GetWordPred(wvsum)
z = tf.tile(tf.reshape(tf.reduce_sum(preds,1),[-1,1]), [1, out_vocab_size])
self.preds, self.z = preds, z
self.probs = tf.div(preds, z) #normalize
self.unweighted_xent = _SafeXEnt(self.y, self.probs)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=weights)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class TweetSeqModel(BaseModel): #formerly SeqModel
"""Single layer LSTM on top of the word embeddings.
Lang id predictions are done on each word and then combined via
a weighted average.
"""
def __init__(self, out_vocab_size=None,
batch_size=10, model_params=None,
c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
"""Initialize the TweetSeqModel
Args:
out_vocab_size: how many languages we are predicting
batch_size: minibatch size
model_params: dictionary of other model parameters
c2v: char2vec class instance
max_sequence_len: length of all the input sequences
dropout_keep_prob: dropout probability indicator
weights: class weights
"""
hidden_size = model_params['model_hidden_size']
proj_size = model_params['model_proj_size'] # optional, can be None
super(TweetSeqModel, self).__init__(batch_size, max_sequence_len,
out_vocab_size, c2v,
dropout_keep_prob)
weights = tf.constant(weights, dtype=tf.float32, name='class_weights')
def GetCell():
"""Creates an LSTM cell with dropout."""
c = tf.nn.rnn_cell.LSTMCell(hidden_size,
use_peepholes=model_params['peepholes'],
num_proj=proj_size)
if dropout_keep_prob is not None:
c = tf.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob)
return c
# Create the bi-directional LSTM
with tf.variable_scope('wordrnn'):
with tf.variable_scope('fw'):
cell_fw = GetCell()
with tf.variable_scope('bw'):
cell_bw = GetCell()
rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs,
dtype=tf.float32,
sequence_length=self.seq_lens)
if proj_size:
out_size = 2 * proj_size
else:
out_size = 2 * hidden_size
super(TweetSeqModel, self)._DoPredictions(out_size, rnnout, class_weights=weights)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class CharSeqModel(object): #formerly TweetSeqModel
"""
Treats each document (tweet) as a single "word," which is fed through c2v,
and the output "embedding" sized to be a vector of language predictions.
"""
def __init__(self, out_vocab_size=None,
batch_size=10, model_params=None, c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
self.params = model_params
self._out_vocab_size = out_vocab_size # num. of languages
self.weights = tf.constant(weights, dtype=tf.float32, name='class_weights')
with tf.variable_scope("tweetff"):
hidden = tf.get_variable("ff_hidden",
[c2v.embedding_dims, out_vocab_size])
bias = tf.get_variable('ff_bias', [out_vocab_size])
#probably useless. at least I don't want to use it
self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens')
self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len],
name='x')
self.y = tf.placeholder(tf.float32, [batch_size, out_vocab_size],
name='y')
self.example_weights = tf.placeholder(tf.float32, [batch_size],
name='example_weights')
# get one 'word' embedding for the full tweet
tweet_embedding = c2v.GetEmbeddings(self.x)[:,1,:]
logits = tf.nn.xw_plus_b(tweet_embedding, hidden, bias)
self.probs = tf.nn.softmax(logits)
self._xent = tf.nn.softmax_cross_entropy_with_logits(logits, self.y)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class WordLevelModel(object):
"""
Model to evaluate on word-level predictions
Args:
batch_size: minibatch size
model_params: dictionary of other model parameters
c2v: char2vec class instance
max_sequence_len: length of all the input/output sequences
out_vocab_size: how many languages we are predicting
dropout_keep_prob: dropout probability indicator
weights: class weights
"""
def __init__(self, batch_size, model_params, c2v, max_sequence_len,
out_vocab_size, dropout_keep_prob=0.0, weights=None):
self._batch_size = batch_size
self._dropout_keep_prob = dropout_keep_prob
self._out_vocab_size = out_vocab_size
self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len],
name='x')
self.y = tf.placeholder(tf.float32,
[batch_size, max_sequence_len, out_vocab_size],
name='y')
# The bidirectional rnn code requires seq_lens as int64
self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens')
self.example_weights = tf.placeholder(tf.float32, [batch_size],
name='example_weights')
embeddings = c2v.GetEmbeddings(self.x)
self._inputs = [tf.squeeze(input_, [1]) for input_ in
tf.split(1, max_sequence_len, embeddings)]
# Need to prepare a mask to zero out the padding symbols.
# Make a batch_size x max_sequence_len matrix where each
# row contains the length repeated max_sequence_len times.
lengths_transposed = tf.expand_dims(tf.to_int32(self.seq_lens), 1)
lengths_tiled = tf.tile(lengths_transposed, [1, max_sequence_len])
# Make a matrix where each row contains [0, 1, ..., max_sequence_len]
r = tf.range(0, max_sequence_len, 1)
range_row = tf.expand_dims(r, 0)
range_tiled = tf.tile(range_row, [batch_size, 1])
self.lengths_transposed = lengths_transposed
self.lengths_tiled = lengths_tiled
self.range_row = range_row
self.range_tiled = range_tiled
# Use the logical operations to create a mask
indicator = tf.less(range_tiled, lengths_tiled+1) #i.e. where seq len is less than index
trim = np.ones(indicator.get_shape())
trim[:,0] = 0 #ignore start symbol
indicator = tf.logical_and(indicator, trim.astype(bool))
self.indicator = indicator
sz = [batch_size, max_sequence_len]
self._mask = tf.select(indicator, tf.ones(sz), tf.zeros(sz))
#-------------------------------#
self.weights = tf.constant(weights, dtype=tf.float32, name='class_weights')
hidden_size = model_params['model_hidden_size']
proj_size = model_params['model_proj_size'] # optional, can be None
def GetCell():
"""Creates an LSTM cell with dropout."""
c = tf.nn.rnn_cell.LSTMCell(hidden_size,
use_peepholes=model_params['peepholes'],
num_proj=proj_size)
if dropout_keep_prob is not None:
c = tf.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob)
return c
# Create the bi-directional LSTM
with tf.variable_scope('wordrnn'):
with tf.variable_scope('fw'):
cell_fw = GetCell()
with tf.variable_scope('bw'):
cell_bw = GetCell()
rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs,
dtype=tf.float32,
sequence_length=self.seq_lens)
if proj_size:
out_size = 2 * proj_size
else:
out_size = 2 * hidden_size
self._DoPredictions(out_size, rnnout, self.weights)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
def _DoPredictions(self, in_size, mats, class_weights=None):
"""Takes in an array of states and calculates predictions.
Get the cross-entropy for each example in the vector self._xent.
Args:
in_size: size of the hidden state vectors
mats: list of hidden state vectors
"""
pred_mat = tf.get_variable('pred_mat',
[in_size, self._out_vocab_size])
pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size])
# Make a prediction on every word.
def GetWordPred(o_):
logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
return tf.nn.softmax(logits)
#self.preds_by_word1 = tf.pack([GetWordPred(o_) for o_ in mats])
#self.preds_by_word = tf.reshape(self.preds_by_word1, self.y.get_shape())
#self.probs = tf.mul(tf.expand_dims(self._mask,2), self.preds_by_word)
self.preds_by_word = tf.pack([GetWordPred(o_) for o_ in mats])
self.preds_by_instance = tf.pack([self.preds_by_word[:,i,:] for i in range(self.preds_by_word.get_shape()[1])])
self.probs = tf.mul(tf.expand_dims(self._mask,2), self.preds_by_instance)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=class_weights, sumd=[1,2])
def _SafeXEnt(y, probs, eps=0.0001, class_weights=None, sumd=[1]):
"""Version of cross entropy loss that should not produce NaNs.
If the predicted proability for the true class is near zero then when
taking the log it can produce a NaN, which ruins everything. This
function ensures each probability is at least eps and no more than one
before taking the log.
Args:
y: matrix of true probabilities same size as probs
probs: matrix of probabilities for the minibatch
eps: value to clip the probabilities at
class_weights: vector of relative weights to be assigned to each class
sumd: dimensions along which to sum the x-ent matrix
Returns:
cross entropy loss for each example in the minibatch
"""
adjusted_probs = tf.clip_by_value(probs, eps, 1.0 - eps)
xent_mat = -y * tf.log(adjusted_probs)
if class_weights is not None:
xent_mat *= class_weights
return tf.reduce_sum(xent_mat, sumd)
def _SafeNegEntropy(probs, batch_size, eps=0.0001):
"""Computes negative entropy in a way that will not overflow."""
adjusted_probs = tf.clip_by_value(probs, eps, 1.0 - eps)
entropy = tf.mul(probs, tf.log(adjusted_probs))
return tf.reduce_sum(entropy) / batch_size
| |
import pytest
from pupa.scrape import Membership as ScrapeMembership
from pupa.scrape import Person as ScrapePerson
from pupa.importers import MembershipImporter, PersonImporter, OrganizationImporter
from pupa.exceptions import NoMembershipsError
from opencivicdata.core.models import Organization, Post, Person, Division, Jurisdiction
class DumbMockImporter(object):
""" this is a mock importer that implements a resolve_json_id that is just a pass-through """
json_to_db_id = {}
def resolve_json_id(self, json_id, allow_no_match=False):
return json_id
def create_jurisdiction():
Division.objects.create(id='ocd-division/country:us', name='USA')
Jurisdiction.objects.create(id='fnd-jid', division_id='ocd-division/country:us')
@pytest.mark.django_db
def test_full_membership():
create_jurisdiction()
org = Organization.objects.create(id="fnd", name="Foundation", classification="foundation",
jurisdiction_id="fnd-jid")
hari = Person.objects.create(id="hs", name="Hari Seldon")
robot = Person.objects.create(id="robot", name="R. Daneel Olivaw")
post = Post.objects.create(id='f', label="founder", role="Founder", organization=org)
# add a membership through a post, with a start date
m1 = ScrapeMembership(person_id=hari.id, organization_id=org.id,
post_id=post.id, start_date='2020-03-10', end_date='2021-06-30')
m1.add_contact_detail(type='phone', value='555-555-1234', note='this is fake')
m1.add_link('http://example.com/link')
# add a membership direct to an organization, with an end date
m2 = ScrapeMembership(person_id=robot.id, organization_id=org.id, label='member',
role='member', end_date='2019-11-09')
dumb_imp = DumbMockImporter()
memimp = MembershipImporter('fnd-jid', dumb_imp, dumb_imp, dumb_imp)
memimp.import_data([m1.as_dict(), m2.as_dict()])
# ensure that the memberships attached in the right places
assert org.memberships.count() == 2
assert hari.memberships.count() == 1
assert robot.memberships.count() == 1
assert post.memberships.count() == 1
# ensure that the first membership has contact details and links
m = hari.memberships.get()
cd = m.contact_details.get()
assert cd.type == 'phone'
assert cd.value == '555-555-1234'
assert cd.note == 'this is fake'
assert m.links.all()[0].url == 'http://example.com/link'
# update the imported memberships (i.e., change attributes that are not
# in the spec) and confirm they resolve correctly
memimp2 = MembershipImporter('fnd-jid', dumb_imp, dumb_imp, dumb_imp)
m1.end_date = '2022-03-10'
m2.extras = {'note': 'bleep blorp'}
import_log = memimp2.import_data([m1.as_dict(), m2.as_dict()])
assert import_log['membership']['insert'] == 0
assert import_log['membership']['update'] == 2
# confirm the membership resolved based on start date and its end date was updated
assert hari.memberships.count() == 1
assert hari.memberships.get().end_date == '2022-03-10'
# confirm the membership resolved based on end date and its extras were updated
assert robot.memberships.count() == 1
assert robot.memberships.get().extras == {'note': 'bleep blorp'}
@pytest.mark.django_db
def test_no_membership_for_person():
create_jurisdiction()
Organization.objects.create(id="fnd", name="Foundation", classification="foundation",
jurisdiction_id="fnd-jid")
# import a person with no memberships
p = ScrapePerson('a man without a country')
person_imp = PersonImporter('fnd-jid')
person_imp.import_data([p.as_dict()])
# try to import a membership
dumb_imp = DumbMockImporter()
memimp = MembershipImporter('fnd-jid', person_imp, dumb_imp, dumb_imp)
with pytest.raises(NoMembershipsError):
memimp.import_data([])
@pytest.mark.django_db
def test_no_membership_for_person_including_party():
"""
even though party is specified we should still get a no memberships error because it doesn't
bind the person to a jurisdiction, thus causing duplication
"""
create_jurisdiction()
Organization.objects.create(id="fnd", name="Foundation", classification="foundation",
jurisdiction_id="fnd-jid")
Organization.objects.create(id="dem", name="Democratic", classification="party")
# import a person with no memberships
p = ScrapePerson('a man without a country', party='Democratic')
person_imp = PersonImporter('fnd-jid')
org_imp = OrganizationImporter('fnd-jid')
person_imp.import_data([p.as_dict()])
# try to import a membership
dumb_imp = DumbMockImporter()
memimp = MembershipImporter('fnd-jid', person_imp, org_imp, dumb_imp)
with pytest.raises(NoMembershipsError):
memimp.import_data([p._related[0].as_dict()])
@pytest.mark.django_db
def test_multiple_orgs_of_same_class():
"""
We should be able to set memberships on organizations with the
same classification within the same jurisdictions
"""
create_jurisdiction()
Organization.objects.create(id="fnd", name="Foundation", classification="foundation",
jurisdiction_id="fnd-jid")
Organization.objects.create(id="fdr", name="Federation", classification="foundation",
jurisdiction_id="fnd-jid")
hari = ScrapePerson('Hari Seldon',
primary_org='foundation',
role='founder',
primary_org_name='Foundation')
picard = ScrapePerson('Jean Luc Picard',
primary_org='foundation',
role='founder',
primary_org_name='Federation')
person_imp = PersonImporter('fnd-jid')
person_imp.import_data([hari.as_dict()])
person_imp.import_data([picard.as_dict()])
# try to import a membership
org_imp = OrganizationImporter('fnd-jid')
dumb_imp = DumbMockImporter()
memimp = MembershipImporter('fnd-jid', person_imp, org_imp, dumb_imp)
memimp.import_data([hari._related[0].as_dict(), picard._related[0].as_dict()])
assert Person.objects.get(name='Hari Seldon'
).memberships.get().organization.name == 'Foundation'
assert Person.objects.get(name='Jean Luc Picard'
).memberships.get().organization.name == 'Federation'
@pytest.mark.django_db
def test_multiple_posts_class():
create_jurisdiction()
org = Organization.objects.create(id="fnd", name="Foundation", classification="foundation",
jurisdiction_id="fnd-jid")
hari = Person.objects.create(id="hs", name="Hari Seldon")
founder = Post.objects.create(id='f', label="founder", role="Founder", organization=org)
chair = Post.objects.create(id='c', label="chair", role="Chair", organization=org)
m1 = ScrapeMembership(person_id=hari.id, organization_id=org.id, post_id=founder.id)
m2 = ScrapeMembership(person_id=hari.id, organization_id=org.id, post_id=chair.id)
dumb_imp = DumbMockImporter()
memimp = MembershipImporter('fnd-jid', dumb_imp, dumb_imp, dumb_imp)
memimp.import_data([m1.as_dict(), m2.as_dict()])
# ensure that the memberships attached in the right places
assert org.memberships.count() == 2
assert hari.memberships.count() == 2
assert founder.memberships.count() == 1
assert chair.memberships.count() == 1
@pytest.mark.django_db
def test_unmatched_person():
create_jurisdiction()
org = Organization.objects.create(id="fnd", name="Foundation", classification="foundation",
jurisdiction_id="fnd-jid")
# not a real person, won't have a person_id after import
m1 = ScrapeMembership(person_name='Harry Seldom', organization_id=org.id,
person_id=None
)
dumb_imp = DumbMockImporter()
memimp = MembershipImporter('fnd-jid', dumb_imp, dumb_imp, dumb_imp)
memimp.import_data([m1.as_dict()])
# ensure that the memberships attached in the right places
assert org.memberships.count() == 1
membership = org.memberships.get()
assert membership.person_id is None
assert membership.person_name == 'Harry Seldom'
| |
from django.db.models.signals import post_save, pre_save
from django.test import TestCase
from .models import Account, Employee, Person, Profile, ProxyEmployee
class UpdateOnlyFieldsTests(TestCase):
def test_update_fields_basic(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s.gender = 'M'
s.name = 'Ian'
s.save(update_fields=['name'])
s = Person.objects.get(pk=s.pk)
self.assertEqual(s.gender, 'F')
self.assertEqual(s.name, 'Ian')
def test_update_fields_deferred(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.defer("gender", "pid").get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_1(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_2(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(2):
s1.save(update_fields=['pid'])
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Sara")
self.assertEqual(s2.gender, "F")
def test_update_fields_only_repeated(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.gender = 'M'
with self.assertNumQueries(1):
s1.save()
# save() should not fetch deferred fields
s1 = Person.objects.only('name').get(pk=s.pk)
with self.assertNumQueries(1):
s1.save()
def test_update_fields_inheritance_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('name').get(pk=e1.pk)
e1.name = 'Linda'
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).name, 'Linda')
def test_update_fields_fk_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile').get(pk=e1.pk)
e1.profile = profile_receptionist
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_receptionist)
e1.profile_id = profile_boss.pk
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_boss)
def test_select_related_only_interaction(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile__salary').select_related('profile').get(pk=e1.pk)
profile_boss.name = 'Clerk'
profile_boss.salary = 1000
profile_boss.save()
# The loaded salary of 3000 gets saved, the name of 'Clerk' isn't
# overwritten.
with self.assertNumQueries(1):
e1.profile.save()
reloaded_profile = Profile.objects.get(pk=profile_boss.pk)
self.assertEqual(reloaded_profile.name, profile_boss.name)
self.assertEqual(reloaded_profile.salary, 3000)
def test_update_fields_m2m(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss)
a1 = Account.objects.create(num=1)
a2 = Account.objects.create(num=2)
e1.accounts.set([a1, a2])
with self.assertRaises(ValueError):
e1.save(update_fields=['accounts'])
def test_update_fields_inheritance(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
with self.assertNumQueries(1):
e3.profile = profile_boss
e3.save(update_fields=['profile_id'])
e4 = Employee.objects.get(pk=e3.pk)
self.assertEqual(e4.profile, profile_boss)
self.assertEqual(e4.profile_id, profile_boss.pk)
def test_update_fields_inheritance_with_proxy_model(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = ProxyEmployee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
def test_update_fields_signals(self):
p = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
p.save(update_fields=['name'])
self.assertEqual(len(pre_save_data), 1)
self.assertEqual(len(pre_save_data[0]), 1)
self.assertIn('name', pre_save_data[0])
self.assertEqual(len(post_save_data), 1)
self.assertEqual(len(post_save_data[0]), 1)
self.assertIn('name', post_save_data[0])
pre_save.disconnect(pre_save_receiver)
post_save.disconnect(post_save_receiver)
def test_update_fields_incorrect_params(self):
s = Person.objects.create(name='Sara', gender='F')
with self.assertRaises(ValueError):
s.save(update_fields=['first_name'])
with self.assertRaises(ValueError):
s.save(update_fields="name")
def test_empty_update_fields(self):
s = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
# Save is skipped.
with self.assertNumQueries(0):
s.save(update_fields=[])
# Signals were skipped, too...
self.assertEqual(len(pre_save_data), 0)
self.assertEqual(len(post_save_data), 0)
pre_save.disconnect(pre_save_receiver)
post_save.disconnect(post_save_receiver)
def test_num_queries_inheritance(self):
s = Employee.objects.create(name='Sara', gender='F')
s.employee_num = 1
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['employee_num'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.employee_num, 1)
self.assertEqual(s.name, 'Sara')
s.employee_num = 2
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['name'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.name, 'Emily')
self.assertEqual(s.employee_num, 1)
# A little sanity check that we actually did updates...
self.assertEqual(Employee.objects.count(), 1)
self.assertEqual(Person.objects.count(), 1)
with self.assertNumQueries(2):
s.save(update_fields=['name', 'employee_num'])
| |
#!/usr/bin/python
""" A simple function plotter based on matplotlib, tkinter and numpy
See "Help" -> "Usage" for details on how to use different mathematical functions """
try:
import tkinter as tk
except ImportError:
import Tkinter as tk
from tkMessageBox import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from scipy.misc import factorial
from sympy.parsing.sympy_parser import parse_expr
from idlelib import ToolTip
import numpy as np
import matplotlib.pyplot as plt
import sympy as sp
import parser
import re
REPLACE_DIC = {'sin' : 'np.sin',
'arcsin' : 'np.arcsin',
'sinh' : 'np.sinh',
'arcsinh' : 'np.arcsinh',
'cos' : 'np.cos',
'arccos' : 'np.arccos',
'cosh' : 'np.cosh',
'arccosh' : 'np.arccosh',
'tan' : 'np.tan',
'arctan' : 'np.arctan',
'tanh' : 'np.tanh',
'arctanh' : 'np.arctanh',
'ln' : 'np.log',
'log' : 'np.log',
'log10' : 'np.log10',
'log2' : 'np.log2',
'exp' : 'np.exp',
'^' : '**',
'fac' : 'factorial',
'sqrt' : 'np.sqrt',
'pi' : 'np.pi',
'PI' : 'np.pi',
'sinc' : 'np.sinc'
}
class App:
def __init__(self, master):
self.master = master
self.initUI()
self.x = 0
self.y = 0
self.legend = 0
self.formula_finish = 0
self.slope = 0
def initUI(self):
""" Initialize the GUI-Elements """
self.master.title("Formula Plotter")
self.menu = tk.Menu(self.master)
self.master.config(menu=self.menu)
self.helpmenu = tk.Menu(self.menu)
self.menu.add_cascade(label='Help', menu=self.helpmenu)
self.helpmenu.add_command(label='Usage', command=self.instructions)
self.scale_x_min = tk.Scale(self.master, from_=-500, to=0,
tickinterval=100, length=600,
orient='horizontal', command=self.set_x_min)
self.scale_x_min.grid(row=4, column=1)
self.x_min = tk.IntVar()
self.scale_x_max = tk.Scale(self.master, from_=0, to=500,
tickinterval=100, length=600,
orient='horizontal', command=self.set_x_max)
self.scale_x_max.grid(row=5, column=1)
self.scale_x_max.set(10)
self.x_max = tk.IntVar()
self.replot_button = tk.Button(self.master, text='New plot',
command=self.replot)
self.replot_button.grid(row=0, column=2)
ToolTip.ToolTip(self.replot_button,
'Clear current plot and draw new function')
self.updateplot_button = tk.Button(self.master, text='Add to plot',
command=self.update)
self.updateplot_button.grid(row=0, column=3)
ToolTip.ToolTip(self.updateplot_button,
'Draw new plot on existing')
self.minima_button = tk.Button(self.master, text='Local Minima',
command=self.minima)
self.minima_button.grid(row=4, column=2)
ToolTip.ToolTip(self.minima_button, 'Show local Minima')
self.maxima_button = tk.Button(self.master, text='Local Maxima',
command=self.maxima)
self.maxima_button.grid(row=5, column=2)
ToolTip.ToolTip(self.maxima_button, 'Show local Maxima')
self.turning_button = tk.Button(self.master, text='Turning point',
command=self.turning_point)
self.turning_button.grid(row=6, column=2)
ToolTip.ToolTip(self.turning_button, 'Show turning points')
self.tangent_button = tk.Button(self.master, text='Tangent',
command=self.tangent)
self.tangent_button.grid(row=6, column=3)
ToolTip.ToolTip(self.tangent_button, 'Show tangent at entered value')
tk.Label(self.master, text='f (x) =').grid(row=0, column=0)
tk.Label(self.master, text='x minimum').grid(row=4, column=0)
tk.Label(self.master, text='x maximum').grid(row=5, column=0)
tk.Label(self.master, text='Enter tangent value').grid(row=4, column=3)
self.formula = tk.Entry(self.master, width=80)
self.formula.grid(row=0, column=1)
self.formula.insert(0, 'sin(x)')
self.tangent_val = tk.Entry(self.master, width=10)
self.tangent_val.grid(row=5, column=3)
self.tangent_val.insert(0, 0)
fig = plt.figure()
canvas = FigureCanvasTkAgg(fig, master=self.master)
toolbar = NavigationToolbar2TkAgg(canvas, self.master)
canvas.get_tk_widget().grid(row=3, column=1)
toolbar.grid(row=6, column=1)
def compute_formula(self, accuracy, x_min, x_max):
""" Compute the formula, based on re, compile and eval """
self.x = np.arange(float(x_min),
float(x_max), accuracy)
x = self.x
formula_raw = self.formula.get().replace('e^x', 'exp(x)')
formula_raw_exp = formula_raw.replace('e^', 'exp')
formula_list = re.split('(\W)', formula_raw_exp)
formula_replace = [REPLACE_DIC.get(item,item) for item in formula_list]
self.formula_finish = ''.join(formula_replace)
form = parser.expr(self.formula_finish).compile()
try:
self.y = eval(form)
self.legend = self.formula.get()
except NameError:
self.y = np.sin(self.x)
self.legend = 'sin(x)'
return (self.x,self.y,self.legend)
def replot(self):
""" Clear old plot and draw new one """
self.compute_formula(0.01,self.get_x_min(),self.get_x_max())
plt.clf()
plt.plot(self.x,self.y,label=self.legend)
plt.grid('on')
legend = plt.legend()
legend.draggable(state=True)
plt.gcf().canvas.draw()
def update(self):
""" Add new plot to the old one(s) """
self.compute_formula(0.01,self.get_x_min(),self.get_x_max())
plt.plot(self.x,self.y, label=self.legend)
legend = plt.legend()
legend.draggable(state=True)
plt.gcf().canvas.draw()
def minima(self):
""" Calculate the local minimas from the last function """
self.compute_formula(0.01,self.get_x_min(),self.get_x_max())
local_min = (np.diff(np.sign(np.diff(self.y))) > 0).nonzero()[0] + 1
for i in self.x[local_min]:
for j in self.y[local_min]:
plt.text(i, j, [float(np.round(i, decimals=3)),
float(np.round(j, decimals=3))])
plt.plot(self.x[local_min], self.y[local_min], "o")
plt.gcf().canvas.draw()
def maxima(self):
""" Calculate the local maximas from the last function """
self.compute_formula(0.01,self.get_x_min(),self.get_x_max())
local_max = (np.diff(np.sign(np.diff(self.y))) < 0).nonzero()[0] + 1
for i in self.x[local_max]:
for j in self.y[local_max]:
plt.text(i, j, [float(np.round(i, decimals=3)),
float(np.round(j, decimals=3))])
plt.plot(self.x[local_max], self.y[local_max], "o")
plt.gcf().canvas.draw()
def turning_point(self):
""" Calculate the turning points from the last function """
self.compute_formula(0.0001,self.get_x_min(),self.get_x_max())
for i in xrange(1, len(self.y)):
if self.y[i] < 0 and self.y[i-1] > 0:
average_y = (self.y[i] + self.y[i-1]) / 2
average_x = (self.x[i] + self.x[i-1]) / 2
plt.plot(average_x,average_y,'o')
plt.text(average_x, average_y, [float(np.round(average_x,
decimals=3)),
float(np.round(average_y,
decimals=3))])
plt.gcf().canvas.draw()
if self.y[i] > 0 and self.y[i-1] < 0:
average_y = (self.y[i] + self.y[i-1]) / 2
average_x = (self.x[i] + self.x[i-1]) / 2
plt.plot(average_x, average_y, 'o')
np.set_printoptions(precision=3)
plt.text(average_x, average_y, [float(np.round(average_x,
decimals=3)),
float(np.round(average_y,
decimals=3))])
plt.gcf().canvas.draw()
def tangent(self):
""" Plots the tangent of the last function at an entered point"""
self.compute_formula(0.0005,float(self.tangent_val.get())-0.0001,
float(self.tangent_val.get())+0.0001)
plt.plot(self.x,self.y,'o')
np.set_printoptions(precision=3)
plt.text(self.x, self.y, [float(np.round(self.x, decimals=3)),
float(np.round(self.y, decimals=3))])
self.differentiate(self.tangent_val.get())
plt.plot([self.x+1,self.x-1],[self.y+self.slope,self.y-self.slope])
plt.gcf().canvas.draw()
def differentiate(self, val):
""" Calculates the differential for plotting the tangent """
x = sp.Symbol('x')
formula = self.formula_finish
form = self.formula_finish.replace('np.', '')
sympy_exp = parse_expr(form)
df = sympy_exp.diff(x)
self.slope = df.evalf(subs={x:val})
def set_x_min(self, val):
""" Set x-min value with the slider """
value = int(float(val))
self.x_min.set(value)
def get_x_min(self):
""" Return x-min value """
return self.x_min.get()
def set_x_max(self, val):
""" Set x-max value with the slider """
value = int(float(val))
self.x_max.set(value)
def get_x_max(self):
""" Return x-max value """
return self.x_max.get()
def instructions(self):
""" Opens a info-window and shows the content of usage.txt """
instruction = open('usage.txt').read()
showinfo(title='Usage', message=instruction)
def main():
root = tk.Tk()
app = App(root)
root.mainloop()
if __name__ == '__main__':
main()
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import hashlib
import json
import re
from github import MainClass
from oslo_config import cfg
from oslo_log import log as logging
import six
from stackalytics.processor import normalizer
from stackalytics.processor import rcs
from stackalytics.processor import user_processor
from stackalytics.processor import utils
LOG = logging.getLogger(__name__)
GITHUB_URI_PREFIX = r'^github:\/\/'
def _check_default_data_change(runtime_storage_inst, default_data):
h = hashlib.new('sha1')
h.update(json.dumps(default_data))
digest = h.hexdigest()
p_digest = runtime_storage_inst.get_by_key('default_data_digest')
if digest == p_digest:
LOG.debug('No changes in default data, sha1: %s', digest)
return False
LOG.debug('Default data has changes, sha1: %s', digest)
runtime_storage_inst.set_by_key('default_data_digest', digest)
return True
def _retrieve_project_list_from_sources(project_sources):
for project_source in project_sources:
uri = project_source.get('uri') or cfg.CONF.review_uri
repo_iterator = []
if re.search(rcs.GERRIT_URI_PREFIX, uri):
repo_iterator = _retrieve_project_list_from_gerrit(project_source)
elif re.search(GITHUB_URI_PREFIX, uri):
repo_iterator = _retrieve_project_list_from_github(project_source)
exclude = set(project_source.get('exclude', []))
for repo in repo_iterator:
if repo['module'] not in exclude:
yield repo
def _retrieve_project_list_from_gerrit(project_source):
LOG.info('Retrieving project list from Gerrit')
try:
uri = project_source.get('uri') or cfg.CONF.review_uri
gerrit_inst = rcs.Gerrit(uri)
key_filename = (project_source.get('ssh_key_filename') or
cfg.CONF.ssh_key_filename)
username = project_source.get('ssh_username') or cfg.CONF.ssh_username
gerrit_inst.setup(key_filename=key_filename, username=username)
project_list = gerrit_inst.get_project_list()
gerrit_inst.close()
except Exception as e:
LOG.exception(e)
LOG.warn('Fail to retrieve list of projects. Keep it unmodified')
return
organization = project_source['organization']
LOG.debug('Get list of projects for organization %s', organization)
git_repos = [f for f in project_list if f.startswith(organization + "/")]
git_base_uri = project_source.get('git_base_uri') or cfg.CONF.git_base_uri
for repo in git_repos:
(org, name) = repo.split('/')
repo_uri = '%(git_base_uri)s/%(repo)s.git' % dict(
git_base_uri=git_base_uri, repo=repo)
yield {
'branches': ['master'],
'module': name,
'organization': org,
'uri': repo_uri,
'releases': []
}
def _retrieve_project_list_from_github(project_source):
LOG.info('Retrieving project list from GitHub')
github = MainClass.Github(timeout=60)
organization = project_source['organization']
LOG.debug('Get list of projects for organization %s', organization)
try:
github_repos = github.get_organization(organization).get_repos()
except Exception as e:
LOG.exception(e)
LOG.warn('Fail to retrieve list of projects. Keep it unmodified')
return
for repo in github_repos:
yield {
'branches': [project_source.get('default_branch', 'master')],
'module': repo.name.lower(),
'organization': organization,
'uri': repo.git_url,
'releases': []
}
def _create_module_groups_for_project_sources(project_sources, repos):
organizations = collections.defaultdict(list)
for repo in repos:
organizations[repo['organization']].append(repo['module'])
ps_organizations = dict([(ps.get('organization'),
ps.get('module_group_name') or
ps.get('organization'))
for ps in project_sources])
module_groups = []
for ogn, modules in six.iteritems(organizations):
module_groups.append(utils.make_module_group(
ogn, name=ps_organizations.get(ogn, ogn), modules=modules,
tag='organization'))
return module_groups
def _update_project_list(default_data):
configured_repos = set([r['uri'] for r in default_data['repos']])
repos = _retrieve_project_list_from_sources(
default_data['project_sources'])
if repos:
default_data['repos'] += [r for r in repos
if r['uri'] not in configured_repos]
default_data['module_groups'] += _create_module_groups_for_project_sources(
default_data['project_sources'], default_data['repos'])
def _update_with_driverlog_data(default_data, driverlog_data_uri):
LOG.info('Reading DriverLog data from uri: %s', driverlog_data_uri)
driverlog_data = utils.read_json_from_uri(driverlog_data_uri)
module_ci_ids = {}
ci_ids = set()
for driver in driverlog_data['drivers']:
if 'ci' in driver:
module = driver['project_id'].split('/')[1]
if module not in module_ci_ids:
module_ci_ids[module] = {}
ci_id = driver['ci']['id']
module_ci_ids[module][ci_id] = driver
if ci_id not in ci_ids:
ci_ids.add(ci_id)
default_data['users'].append({
'user_id': user_processor.make_user_id(gerrit_id=ci_id),
'gerrit_id': ci_id,
'user_name': ci_id,
'static': True,
'companies': [
{'company_name': driver['vendor'], 'end_date': None}],
})
for repo in default_data['repos']:
if repo['module'] in module_ci_ids:
repo['ci'] = module_ci_ids[repo['module']]
def _store_users(runtime_storage_inst, users):
for user in users:
stored_user = user_processor.load_user(runtime_storage_inst,
user_id=user['user_id'])
if stored_user:
stored_user.update(user)
user = stored_user
user['static'] = True
user_processor.store_user(runtime_storage_inst, user)
def _store_companies(runtime_storage_inst, companies):
domains_index = {}
for company in companies:
for domain in company['domains']:
domains_index[domain] = company['company_name']
if 'aliases' in company:
for alias in company['aliases']:
normalized_alias = utils.normalize_company_name(alias)
domains_index[normalized_alias] = company['company_name']
normalized_company_name = utils.normalize_company_name(
company['company_name'])
domains_index[normalized_company_name] = company['company_name']
runtime_storage_inst.set_by_key('companies', domains_index)
def _store_module_groups(runtime_storage_inst, module_groups):
stored_mg = runtime_storage_inst.get_by_key('module_groups') or {}
for mg in module_groups:
name = mg['module_group_name']
module_group_id = mg.get('id') or name
stored_mg[module_group_id] = utils.make_module_group(
module_group_id, name=name, modules=mg['modules'],
tag=mg.get('tag', 'group'))
runtime_storage_inst.set_by_key('module_groups', stored_mg)
STORE_FUNCS = {
'users': _store_users,
'companies': _store_companies,
'module_groups': _store_module_groups,
}
def _store_default_data(runtime_storage_inst, default_data):
normalizer.normalize_default_data(default_data)
LOG.debug('Update runtime storage with default data')
for key, value in six.iteritems(default_data):
if key in STORE_FUNCS:
STORE_FUNCS[key](runtime_storage_inst, value)
else:
runtime_storage_inst.set_by_key(key, value)
def process(runtime_storage_inst, default_data, driverlog_data_uri):
LOG.debug('Process default data')
if 'project_sources' in default_data:
_update_project_list(default_data)
_update_with_driverlog_data(default_data, driverlog_data_uri)
_store_default_data(runtime_storage_inst, default_data)
| |
# -*- coding: UTF-8 -*-
"""Easy to use object-oriented thread pool framework.
A thread pool is an object that maintains a pool of worker threads to perform
time consuming operations in parallel. It assigns jobs to the threads
by putting them in a work request queue, where they are picked up by the
next available thread. This then performs the requested operation in the
background and puts the results in another queue.
The thread pool object can then collect the results from all threads from
this queue as soon as they become available or after all threads have
finished their work. It's also possible, to define callbacks to handle
each result as it comes in.
The basic concept and some code was taken from the book "Python in a Nutshell,
2nd edition" by Alex Martelli, O'Reilly 2006, ISBN 0-596-10046-9, from section
14.5 "Threaded Program Architecture". I wrapped the main program logic in the
ThreadPool class, added the WorkRequest class and the callback system and
tweaked the code here and there. Kudos also to Florent Aide for the exception
handling mechanism.
Basic usage::
>>> pool = ThreadPool(poolsize)
>>> requests = makeRequests(some_callable, list_of_args, callback)
>>> [pool.putRequest(req) for req in requests]
>>> pool.wait()
See the end of the module code for a brief, annotated usage example.
Website : http://chrisarndt.de/projects/threadpool/
"""
__docformat__ = "restructuredtext en"
__all__ = [
'makeRequests',
'NoResultsPending',
'NoWorkersAvailable',
'ThreadPool',
'WorkRequest',
'WorkerThread'
]
__author__ = "Christopher Arndt"
__version__ = '1.2.7'
__revision__ = "$Revision: 416 $"
__date__ = "$Date: 2009-10-07 05:41:27 +0200 (Wed, 07 Oct 2009) $"
__license__ = "MIT license"
# standard library modules
import sys
import threading
import Queue
import traceback
# exceptions
class NoResultsPending(Exception):
"""All work requests have been processed."""
pass
class NoWorkersAvailable(Exception):
"""No worker threads available to process remaining requests."""
pass
# internal module helper functions
def _handle_thread_exception(request, exc_info):
"""Default exception handler callback function.
This just prints the exception info via ``traceback.print_exception``.
"""
traceback.print_exception(*exc_info)
# utility functions
def makeRequests(callable_, args_list, callback=None,
exc_callback=_handle_thread_exception):
"""Create several work requests for same callable with different arguments.
Convenience function for creating several work requests for the same
callable where each invocation of the callable receives different values
for its arguments.
``args_list`` contains the parameters for each invocation of callable.
Each item in ``args_list`` should be either a 2-item tuple of the list of
positional arguments and a dictionary of keyword arguments or a single,
non-tuple argument.
See docstring for ``WorkRequest`` for info on ``callback`` and
``exc_callback``.
"""
requests = []
for item in args_list:
if isinstance(item, tuple):
requests.append(
WorkRequest(callable_, item[0], item[1], callback=callback,
exc_callback=exc_callback)
)
else:
requests.append(
WorkRequest(callable_, [item], None, callback=callback,
exc_callback=exc_callback)
)
return requests
# classes
class WorkerThread(threading.Thread):
"""Background thread connected to the requests/results queues.
A worker thread sits in the background and picks up work requests from
one queue and puts the results in another until it is dismissed.
"""
def __init__(self, requests_queue, results_queue, poll_timeout=5, **kwds):
"""Set up thread in daemonic mode and start it immediatedly.
``requests_queue`` and ``results_queue`` are instances of
``Queue.Queue`` passed by the ``ThreadPool`` class when it creates a new
worker thread.
"""
threading.Thread.__init__(self, **kwds)
self.setDaemon(1)
self._requests_queue = requests_queue
self._results_queue = results_queue
self._poll_timeout = poll_timeout
self._dismissed = threading.Event()
self.start()
def run(self):
"""Repeatedly process the job queue until told to exit."""
while True:
if self._dismissed.isSet():
# we are dismissed, break out of loop
break
# get next work request. If we don't get a new request from the
# queue after self._poll_timout seconds, we jump to the start of
# the while loop again, to give the thread a chance to exit.
try:
request = self._requests_queue.get(True, self._poll_timeout)
except Queue.Empty:
continue
else:
if self._dismissed.isSet():
# we are dismissed, put back request in queue and exit loop
self._requests_queue.put(request)
break
try:
result = request.callable(*request.args, **request.kwds)
self._results_queue.put((request, result))
except:
request.exception = True
self._results_queue.put((request, sys.exc_info()))
def dismiss(self):
"""Sets a flag to tell the thread to exit when done with current job."""
self._dismissed.set()
class WorkRequest:
"""A request to execute a callable for putting in the request queue later.
See the module function ``makeRequests`` for the common case
where you want to build several ``WorkRequest`` objects for the same
callable but with different arguments for each call.
"""
def __init__(self, callable_, args=None, kwds=None, requestID=None,
callback=None, exc_callback=_handle_thread_exception):
"""Create a work request for a callable and attach callbacks.
A work request consists of the a callable to be executed by a
worker thread, a list of positional arguments, a dictionary
of keyword arguments.
A ``callback`` function can be specified, that is called when the
results of the request are picked up from the result queue. It must
accept two anonymous arguments, the ``WorkRequest`` object and the
results of the callable, in that order. If you want to pass additional
information to the callback, just stick it on the request object.
You can also give custom callback for when an exception occurs with
the ``exc_callback`` keyword parameter. It should also accept two
anonymous arguments, the ``WorkRequest`` and a tuple with the exception
details as returned by ``sys.exc_info()``. The default implementation
of this callback just prints the exception info via
``traceback.print_exception``. If you want no exception handler
callback, just pass in ``None``.
``requestID``, if given, must be hashable since it is used by
``ThreadPool`` object to store the results of that work request in a
dictionary. It defaults to the return value of ``id(self)``.
"""
if requestID is None:
self.requestID = id(self)
else:
try:
self.requestID = hash(requestID)
except TypeError:
raise TypeError("requestID must be hashable.")
self.exception = False
self.callback = callback
self.exc_callback = exc_callback
self.callable = callable_
self.args = args or []
self.kwds = kwds or {}
def __str__(self):
return "<WorkRequest id=%s args=%r kwargs=%r exception=%s>" % \
(self.requestID, self.args, self.kwds, self.exception)
class ThreadPool:
"""A thread pool, distributing work requests and collecting results.
See the module docstring for more information.
"""
def __init__(self, num_workers, q_size=0, resq_size=0, poll_timeout=5):
"""Set up the thread pool and start num_workers worker threads.
``num_workers`` is the number of worker threads to start initially.
If ``q_size > 0`` the size of the work *request queue* is limited and
the thread pool blocks when the queue is full and it tries to put
more work requests in it (see ``putRequest`` method), unless you also
use a positive ``timeout`` value for ``putRequest``.
If ``resq_size > 0`` the size of the *results queue* is limited and the
worker threads will block when the queue is full and they try to put
new results in it.
.. warning:
If you set both ``q_size`` and ``resq_size`` to ``!= 0`` there is
the possibilty of a deadlock, when the results queue is not pulled
regularly and too many jobs are put in the work requests queue.
To prevent this, always set ``timeout > 0`` when calling
``ThreadPool.putRequest()`` and catch ``Queue.Full`` exceptions.
"""
self._requests_queue = Queue.Queue(q_size)
self._results_queue = Queue.Queue(resq_size)
self.workers = []
self.dismissedWorkers = []
self.workRequests = {}
self.createWorkers(num_workers, poll_timeout)
def createWorkers(self, num_workers, poll_timeout=5):
"""Add num_workers worker threads to the pool.
``poll_timout`` sets the interval in seconds (int or float) for how
often threads should check wether they are dismissed, while waiting for
requests.
"""
for i in range(num_workers):
self.workers.append(WorkerThread(self._requests_queue, self._results_queue, poll_timeout=poll_timeout))
def dismissWorkers(self, num_workers, do_join=False):
"""Tell num_workers worker threads to quit after their current task."""
dismiss_list = []
for i in range(min(num_workers, len(self.workers))):
worker = self.workers.pop()
worker.dismiss()
dismiss_list.append(worker)
if do_join:
for worker in dismiss_list:
worker.join()
else:
self.dismissedWorkers.extend(dismiss_list)
def joinAllDismissedWorkers(self):
"""Perform Thread.join() on all worker threads that have been dismissed.
"""
for worker in self.dismissedWorkers:
worker.join()
self.dismissedWorkers = []
def putRequest(self, request, block=True, timeout=None):
"""Put work request into work queue and save its id for later."""
assert isinstance(request, WorkRequest)
# don't reuse old work requests
assert not getattr(request, 'exception', None)
self._requests_queue.put(request, block, timeout)
self.workRequests[request.requestID] = request
def poll(self, block=False):
"""Process any new results in the queue."""
while True:
# still results pending?
if not self.workRequests:
raise NoResultsPending
# are there still workers to process remaining requests?
elif block and not self.workers:
raise NoWorkersAvailable
try:
# get back next results
request, result = self._results_queue.get(block=block)
# has an exception occured?
if request.exception and request.exc_callback:
request.exc_callback(request, result)
# hand results to callback, if any
if request.callback and not (request.exception and request.exc_callback):
request.callback(request, result)
del self.workRequests[request.requestID]
except Queue.Empty:
break
def wait(self):
"""Wait for results, blocking until all have arrived."""
while 1:
try:
self.poll(True)
except NoResultsPending:
break
################
# USAGE EXAMPLE
################
if __name__ == '__main__':
import random
import time
# the work the threads will have to do (rather trivial in our example)
def do_something(data):
time.sleep(random.randint(1, 5))
result = round(random.random() * data, 5)
# just to show off, we throw an exception once in a while
if result > 5:
raise RuntimeError("Something extraordinary happened!")
return result
# this will be called each time a result is available
def print_result(request, result):
print "**** Result from request #%s: %r" % (request.requestID, result)
# this will be called when an exception occurs within a thread
# this example exception handler does little more than the default handler
def handle_exception(request, exc_info):
if not isinstance(exc_info, tuple):
# Something is seriously wrong...
print request
print exc_info
raise SystemExit
print "**** Exception occured in request #%s: %s" % \
(request.requestID, exc_info)
# assemble the arguments for each job to a list...
data = [random.randint(1, 10) for i in range(20)]
# ... and build a WorkRequest object for each item in data
requests = makeRequests(do_something, data, print_result, handle_exception)
# to use the default exception handler, uncomment next line and comment out
# the preceding one.
#requests = makeRequests(do_something, data, print_result)
# or the other form of args_lists accepted by makeRequests: ((,), {})
data = [((random.randint(1, 10),), {}) for i in range(20)]
requests.extend(
makeRequests(do_something, data, print_result, handle_exception)
#makeRequests(do_something, data, print_result)
# to use the default exception handler, uncomment next line and comment
# out the preceding one.
)
# we create a pool of 3 worker threads
print "Creating thread pool with 3 worker threads."
main = ThreadPool(3)
# then we put the work requests in the queue...
for req in requests:
main.putRequest(req)
print "Work request #%s added." % req.requestID
# or shorter:
# [main.putRequest(req) for req in requests]
# ...and wait for the results to arrive in the result queue
# by using ThreadPool.wait(). This would block until results for
# all work requests have arrived:
# main.wait()
# instead we can poll for results while doing something else:
i = 0
while True:
try:
time.sleep(0.5)
main.poll()
print "Main thread working...",
print "(active worker threads: %i)" % (threading.activeCount() - 1, )
if i == 10:
print "**** Adding 3 more worker threads..."
main.createWorkers(3)
if i == 20:
print "**** Dismissing 2 worker threads..."
main.dismissWorkers(2)
i += 1
except KeyboardInterrupt:
print "**** Interrupted!"
break
except NoResultsPending:
print "**** No pending results."
break
if main.dismissedWorkers:
print "Joining all dismissed worker threads..."
main.joinAllDismissedWorkers()
| |
"""Support for the IBM Watson IoT Platform."""
import logging
import queue
import threading
import time
from ibmiotf import MissingMessageEncoderException
from ibmiotf.gateway import Client
import voluptuous as vol
from homeassistant.const import (
CONF_DOMAINS,
CONF_ENTITIES,
CONF_EXCLUDE,
CONF_ID,
CONF_INCLUDE,
CONF_TOKEN,
CONF_TYPE,
EVENT_HOMEASSISTANT_STOP,
EVENT_STATE_CHANGED,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
from homeassistant.helpers import state as state_helper
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_ORG = "organization"
DOMAIN = "watson_iot"
MAX_TRIES = 3
RETRY_DELAY = 20
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
vol.Schema(
{
vol.Required(CONF_ORG): cv.string,
vol.Required(CONF_TYPE): cv.string,
vol.Required(CONF_ID): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_EXCLUDE, default={}): vol.Schema(
{
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
),
vol.Optional(CONF_INCLUDE, default={}): vol.Schema(
{
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
),
}
)
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the Watson IoT Platform component."""
conf = config[DOMAIN]
include = conf[CONF_INCLUDE]
exclude = conf[CONF_EXCLUDE]
include_e = set(include[CONF_ENTITIES])
include_d = set(include[CONF_DOMAINS])
exclude_e = set(exclude[CONF_ENTITIES])
exclude_d = set(exclude[CONF_DOMAINS])
client_args = {
"org": conf[CONF_ORG],
"type": conf[CONF_TYPE],
"id": conf[CONF_ID],
"auth-method": "token",
"auth-token": conf[CONF_TOKEN],
}
watson_gateway = Client(client_args)
def event_to_json(event):
"""Add an event to the outgoing list."""
state = event.data.get("new_state")
if (
state is None
or state.state in (STATE_UNKNOWN, "", STATE_UNAVAILABLE)
or state.entity_id in exclude_e
or state.domain in exclude_d
):
return
if (include_e and state.entity_id not in include_e) or (
include_d and state.domain not in include_d
):
return
try:
_state_as_value = float(state.state)
except ValueError:
_state_as_value = None
if _state_as_value is None:
try:
_state_as_value = float(state_helper.state_as_number(state))
except ValueError:
_state_as_value = None
out_event = {
"tags": {"domain": state.domain, "entity_id": state.object_id},
"time": event.time_fired.isoformat(),
"fields": {"state": state.state},
}
if _state_as_value is not None:
out_event["fields"]["state_value"] = _state_as_value
for key, value in state.attributes.items():
if key != "unit_of_measurement":
# If the key is already in fields
if key in out_event["fields"]:
key = f"{key}_"
# For each value we try to cast it as float
# But if we can not do it we store the value
# as string
try:
out_event["fields"][key] = float(value)
except (ValueError, TypeError):
out_event["fields"][key] = str(value)
return out_event
instance = hass.data[DOMAIN] = WatsonIOTThread(hass, watson_gateway, event_to_json)
instance.start()
def shutdown(event):
"""Shut down the thread."""
instance.queue.put(None)
instance.join()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
return True
class WatsonIOTThread(threading.Thread):
"""A threaded event handler class."""
def __init__(self, hass, gateway, event_to_json):
"""Initialize the listener."""
threading.Thread.__init__(self, name="WatsonIOT")
self.queue = queue.Queue()
self.gateway = gateway
self.gateway.connect()
self.event_to_json = event_to_json
self.write_errors = 0
self.shutdown = False
hass.bus.listen(EVENT_STATE_CHANGED, self._event_listener)
@callback
def _event_listener(self, event):
"""Listen for new messages on the bus and queue them for Watson IoT."""
item = (time.monotonic(), event)
self.queue.put(item)
def get_events_json(self):
"""Return an event formatted for writing."""
events = []
try:
if (item := self.queue.get()) is None:
self.shutdown = True
else:
event_json = self.event_to_json(item[1])
if event_json:
events.append(event_json)
except queue.Empty:
pass
return events
def write_to_watson(self, events):
"""Write preprocessed events to watson."""
for event in events:
for retry in range(MAX_TRIES + 1):
try:
for field in event["fields"]:
value = event["fields"][field]
device_success = self.gateway.publishDeviceEvent(
event["tags"]["domain"],
event["tags"]["entity_id"],
field,
"json",
value,
)
if not device_success:
_LOGGER.error("Failed to publish message to Watson IoT")
continue
break
except (MissingMessageEncoderException, OSError):
if retry < MAX_TRIES:
time.sleep(RETRY_DELAY)
else:
_LOGGER.exception("Failed to publish message to Watson IoT")
def run(self):
"""Process incoming events."""
while not self.shutdown:
if event := self.get_events_json():
self.write_to_watson(event)
self.queue.task_done()
def block_till_done(self):
"""Block till all events processed."""
self.queue.join()
| |
from django.apps import apps
from django.contrib import messages
from django.contrib.auth.mixins import (LoginRequiredMixin,
PermissionRequiredMixin,
UserPassesTestMixin)
from django.core.urlresolvers import reverse, reverse_lazy
from django.db import IntegrityError
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib.sites.shortcuts import get_current_site
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.views import generic
from django.views.generic import TemplateView
from .forms import GroupForm # , CodeForm
from .mixins import FormMessageMixin
from .models import Group, GroupMember # , ActivationCode
from .tokens import email_unsubscribe_token
class IndexView(TemplateView):
template_name = 'groups/index.html'
class GroupCreateView(LoginRequiredMixin, FormMessageMixin, generic.CreateView):
model = Group
form_class = GroupForm
template_name = 'groups/group_create_form.html'
def form_valid(self, form):
group = form.save()
GroupMember.objects.create(user=self.request.user, group=group, is_moderator=True)
return super(GroupCreateView, self).form_valid(form)
class GroupDetailView(generic.DetailView):
model = Group
def get(self, request, *args, **kwargs):
if self.request.user.is_staff or not self.get_object().is_private or self.request.user in self.get_object().members.all():
return super(GroupDetailView, self).get(request, *args, **kwargs)
messages.error(self.request, 'This is a private group, you need to have a invitation link.')
return redirect('groups:group_list')
class GroupListView(generic.ListView):
model = Group
def get_queryset(self, *args, **kwargs):
return Group.objects.filter(is_private=False)
class GroupJoinView(LoginRequiredMixin, generic.RedirectView):
def get_redirect_url(self, *args, **kwargs):
return reverse('groups:group_detail', kwargs={'slug': self.kwargs.get('slug')})
def get(self, request, *args, **kwargs):
group = get_object_or_404(Group, slug=self.kwargs.get('slug'))
if self.request.user.is_staff or not group.is_private:
try:
GroupMember.objects.create(user=self.request.user, group=group)
except IntegrityError:
messages.warning(self.request, 'Warning, already a member of {}.'.format(group.name))
else:
messages.success(self.request, 'You are now a member of the {} group.'.format(group.name))
return super(GroupJoinView, self).get(request, *args, **kwargs)
messages.error(self.request, 'This is a private group, you need to have a invitation link.')
return redirect('groups:group_list')
class GroupLeaveView(LoginRequiredMixin, generic.RedirectView):
def get_redirect_url(self, *args, **kwargs):
return reverse('groups:group_list')
def get(self, request, *args, **kwargs):
try:
membership = GroupMember.objects.get(user=self.request.user, group__slug=self.kwargs.get('slug'))
except GroupMember.DoesNotExist:
messages.warning(self.request, 'You can\'t leave this group because you aren\'t in it.')
else:
membership.delete()
messages.success(self.request, 'You have successfully left this group.')
return super(GroupLeaveView, self).get(request, *args, **kwargs)
class GroupUpdateView(LoginRequiredMixin, UserPassesTestMixin, FormMessageMixin, generic.UpdateView):
model = Group
form_class = GroupForm
template_name = 'groups/group_update_form.html'
login_url = reverse_lazy('accounts:login')
def get_group(self, queryset=None):
group_slug = self.kwargs.get('slug')
return Group.objects.get(slug=group_slug)
def test_func(self):
try:
return self.request.user.is_staff or self.get_group().memberships.get(user=self.request.user).is_moderator
except GroupMember.DoesNotExist:
return False
class GroupDeleteView(LoginRequiredMixin, UserPassesTestMixin, generic.DeleteView):
model = Group
login_url = reverse_lazy('accounts:login')
success_url = reverse_lazy('groups:group_list')
def get_group(self, queryset=None):
group_slug = self.kwargs.get('slug')
return Group.objects.get(slug=group_slug)
def test_func(self):
try:
return self.request.user.is_staff or self.get_group().memberships.get(user=self.request.user).is_moderator
except GroupMember.DoesNotExist:
return False
class EmailUnsubscribeView(LoginRequiredMixin, UserPassesTestMixin, generic.DetailView):
model = Group
template_name = 'groups/unsubscribe.html'
login_url = reverse_lazy('accounts:login')
def get(self, request, *args, **kwargs):
gm = GroupMember.objects.get(group__slug=self.kwargs.get('slug'), user=self.request.user)
if not gm.subscribed:
messages.warning(self.request, 'You aren\'t subscribed to this group.')
return redirect('groups:group_detail', slug=gm.group.slug)
return super(EmailUnsubscribeView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
gm = GroupMember.objects.get(group__slug=self.kwargs.get('slug'), user=self.request.user)
if not gm.subscribed:
messages.warning(self.request, 'You aren\'t subscribed to this group.')
return redirect('groups:group_detail', slug=gm.group.slug)
subject = 'Unsubscribe from {} in SCI#Organizer'.format(gm.group)
current_site = get_current_site(self.request)
msg = render_to_string('groups/unsubscribe_email.html', {
'group': gm.group, 'domain': current_site,
'gmid': urlsafe_base64_encode(force_bytes(gm.pk)),
'token': email_unsubscribe_token.make_token(gm),
})
gm.user.email_user(subject, msg)
messages.success(self.request, 'E-mail has been sent! Please check your inbox for the confirmation link.')
return redirect('groups:group_detail', slug=gm.group.slug)
def test_func(self):
return self.request.user.is_staff or self.request.user in self.get_object().members.all()
class EmailSubscribeView(LoginRequiredMixin, UserPassesTestMixin, generic.DetailView):
model = Group
login_url = reverse_lazy('accounts:login')
def get_redirect_url(self, *args, **kwargs):
return reverse('groups:group_detail', kwargs={'slug': self.kwargs.get('slug')})
def get(self, request, *args, **kwargs):
gm = GroupMember.objects.get(group__slug=self.kwargs.get('slug'), user=self.request.user)
if gm.subscribed:
messages.warning(self.request, 'You are already subscribed to this group.')
return redirect(self.get_redirect_url())
subject = 'Subscribe to {} in SCI#Organizer'.format(gm.group)
current_site = get_current_site(self.request)
msg = render_to_string('groups/subscribe_email.html', {
'group': gm.group, 'domain': current_site,
'gmid': urlsafe_base64_encode(force_bytes(gm.pk)),
'token': email_unsubscribe_token.make_token(gm),
})
gm.user.email_user(subject, msg)
messages.success(self.request, 'E-mail has been sent! Please check your inbox for the confirmation link.')
return super(EmailSubscribeView, self).get(request, *args, **kwargs)
def test_func(self):
return self.request.user.is_staff or self.request.user in self.get_object().members.all()
def subscribe(request, slug, gmidb64, token):
try:
gmid = force_text(urlsafe_base64_decode(gmidb64))
group_member = GroupMember.objects.get(pk=gmid)
except (TypeError, ValueError, OverflowError, GroupMember.DoesNotExist):
group_member = None
if group_member is not None and email_unsubscribe_token.check_token(group_member, token):
group_member.subscribed = True
group_member.save()
messages.success(request, 'Your have successfully subscribed to this group.')
return redirect('groups:group_detail', slug=group_member.group.slug)
else:
messages.error(request, 'The link was invalid, possibly because it has already been used.')
return redirect('groups:group_detail', slug=group_member.group.slug)
def unsubscribe(request, slug, gmidb64, token):
try:
gmid = force_text(urlsafe_base64_decode(gmidb64))
group_member = GroupMember.objects.get(pk=gmid)
except (TypeError, ValueError, OverflowError, GroupMember.DoesNotExist):
group_member = None
if group_member is not None and email_unsubscribe_token.check_token(group_member, token):
group_member.subscribed = False
group_member.save()
messages.success(request, 'Your subscription for this group has been successfully cancelled.')
return redirect('groups:group_detail', slug=group_member.group.slug)
else:
messages.error(request, 'The link was invalid, possibly because it has already been used.')
return redirect('groups:group_detail', slug=group_member.group.slug)
# class GroupInviteView(LoginRequiredMixin, generic.RedirectView):
# def get(self, request, *args, **kwargs):
# code = ActivationCode.objects.get(code=self.kwargs.get('code'))
# if code.active or code is not None:
# try:
# GroupMember.objects.create(user=self.request.user, group=code.group)
# code.deactivate()
# messages.success(self.request, 'You are now a member of the {} group.'.format(code.group.name))
# return redirect('groups:group_detail', slug=code.group.slug)
# except IntegrityError:
# messages.warning(self.request, 'Warning, already a member of {}.'.format(code.group.name))
# return redirect('groups:group_detail', slug=code.group.slug)
# messages.error(self.request, 'Given link was invalid, possibly because it has already been used.')
# return redirect('groups:group_list')
# class CodeCreateView(LoginRequiredMixin, UserPassesTestMixin, generic.CreateView):
# model = ActivationCode
# form_class = CodeForm
# template_name = 'groups/code_create_form.html'
# def form_valid(self, form):
# group = self.get_group()
# for i in range(int(form.cleaned_data['amount'])):
# code = ActivationCode.objects.create(group=group).save()
# messages.success(self.request, 'Successfully generated {} code-s'.format(form.cleaned_data['amount']))
# return redirect('groups:code_list', slug=group.slug)
# def get_success_url(self):
# return reverse('groups:group_detail', kwargs={'slug': self.kwargs.get('slug')})
# def get_group(self, queryset=None):
# group_slug = self.kwargs.get('slug')
# return Group.objects.get(slug=group_slug)
# def test_func(self):
# try:
# return self.request.user.is_staff or self.get_group().memberships.get(user=self.request.user).is_moderator
# except GroupMember.DoesNotExist:
# return False
# class CodeListView(LoginRequiredMixin, UserPassesTestMixin, generic.ListView):
# model = ActivationCode
# template_name = 'groups/code_list.html'
# def get_queryset(self):
# return ActivationCode.objects.filter(active=True, group=self.get_group())
# def get_group(self, queryset=None):
# group_slug = self.kwargs.get('slug')
# return Group.objects.get(slug=group_slug)
# def test_func(self):
# try:
# return self.request.user.is_staff or self.get_group().memberships.get(user=self.request.user).is_moderator
# except GroupMember.DoesNotExist:
# return False
# class CodeDeleteView(LoginRequiredMixin, UserPassesTestMixin, generic.DeleteView):
# model = ActivationCode
# template_name = 'groups/code_confirm_delete.html'
# def get_success_url(self):
# return reverse('groups:code_list', kwargs={'slug': self.get_group().slug})
# def get_object(self, queryset=None):
# code = self.kwargs.get('code')
# return ActivationCode.objects.get(group=self.get_group(), code=code)
# def get_group(self, queryset=None):
# group_slug = self.kwargs.get('slug')
# return Group.objects.get(slug=group_slug)
# def test_func(self):
# try:
# return self.request.user.is_staff or self.get_group().memberships.get(user=self.request.user).is_moderator
# except GroupMember.DoesNotExist:
# return False
| |
##
# Copyright (c) 2015, Tyler Finethy, David Jurgens
#
# All rights reserved. See LICENSE file for details
##
"""
Geocoder and Reverse-Geocoder to be used by the Geolocation Project
Allows for multiple dataset inputs
"""
import os, os.path
import csv
import re
import logging
import sys
import gzip
import json
LOGGER = logging.getLogger(os.path.basename(__file__))
class Geocoder(object):
"""
Geocoder to be used on the Geolocation Inference Project.
"""
def __init__(self,dataset="geonames"):
"""
Initializes the "geocoder" dictionary from geonames
"""
self.abbv_to_state = state_abbv_data()
self.state_abbv_regex = re.compile(r'(\b' + (r'\b|\b'.join(self.abbv_to_state.keys())) + r'\b)')
self.lc_name_to_location = {}
LOGGER.debug("Geocoder loading city-location mapping from %s" % (dataset))
if dataset == "geonames":
data = geonames_data()
line_no = 0
for line in data[1:]:
#TODO this city name should be formatted the same as incoming tweets
city_name = line[0].lower()
if not city_name:
continue
line_no += 1
if line_no % 1000000 == 0:
LOGGER.debug("currently read %d locations from %s" %
(line_no, dataset))
lat = float(line[1])
lon = float(line[2])
self.lc_name_to_location[city_name] = (lat, lon)
else:
raise NotImplementedError(dataset)
LOGGER.debug("Geocoder loaded %d locations from %s" %
(len(self.lc_name_to_location), dataset))
def geocode_noisy(self, location_name):
"""
Returns the latitude and longitude (tuple) of a noisy location name
(e.g., the location field of a social media user's profile). If your
input isn't cleaned, you probably want this method instead of geocode().
"""
usaRegex = re.compile("\\busa\\b")
usRegex = re.compile("\\bus\\b")
ukRegex = re.compile("\\buk\\b")
name = location_name.lower()
name = name.strip()
# Correct for a few common noisy prefices
if name.startswith("the city of "):
name = name[12:] #.substring("the city of ".length())
if name.startswith("downtown "):
name = name[9:] #.substring("downtown ".length())
# Swap out the three common contry abbrevations
name = re.sub(usaRegex, "united states", name)
name = re.sub(usRegex, "united states", name)
name = re.sub(ukRegex, "united kingdom", name)
# Substitute out state names from the US
matches = re.search(self.state_abbv_regex, name)
if not matches is None:
abbv = matches.group(0)
expanded = name[:matches.start(0)] + self.abbv_to_state[abbv] + name[matches.end(0):]
#print "%s:: %s -> %s" % (abbv, name, expanded)
name = expanded
# Once we've matched abbreivations, lower case for all further
# comparisons
name = name.lower()
if name == "washington, d.c." or name == "washington dc" or name == "washington, dc":
return (38.904722, -77.016389)
# Strip off all the cruft on either side
name = re.sub(r'^[\W+]+', " ", name)
name = re.sub(r'[\W+]+$', " ", name)
name = name.strip()
# Rename the dict for brevity since we're going to referencing it a lot
# in the next section
locs = self.lc_name_to_location
# Last ditch effort: just try matching the whole name and hope it's
# a single unambiguous city match
if name in locs:
return locs[name]
try:
if name.find(':') > 0:
idx = name.find(':')
coords = name[idx+1:].replace(' ','').split(',')
return (float(coords[0]), float(coords[1]))
else:
coords = name.replace(' ','').split(',')
return (float(coords[0]), float(coords[1]))
except:
pass
# print "SEACHING %s..." % (name)
# Look for some name delimeters in the name to try matching on
# city/state, etc.
if name.find(',') >= 0 or name.find('-') >= 0 or name.find('|') >= 0:
parts = re.split(r'[,\-|]+', name)
if len(parts) == 2:
p1 = parts[0].strip()
p2 = parts[1].strip()
# print "CASE1: (%s) (%s)" % (p1, p2)
if p1 + '\t' + p2 in locs:
return locs[p1 + '\t' + p2]
elif p2 + '\t' + p1 in locs:
return locs[p2 + '\t' + p1]
elif p1 in locs:
return locs[p1]
if p1.find("st.") >= 0:
p1 = re.sub("st.", "saint", p1)
if p1 + '\t' + p2 in locs:
return locs[p1 + '\t' + p2]
elif p2 + '\t' + p1 in locs:
return locs[p2 + '\t' + p1]
elif p1 in locs:
return locs[p1]
elif p1.find("saint") >= 0:
p1 = re.sub("saint", "st.", p1)
if p1 + '\t' + p2 in locs:
return locs[p1 + '\t' + p2]
elif p2 + '\t' + p1 in locs:
return locs[p2 + '\t' + p1]
elif p1 in locs:
return locs[p1]
elif len(parts) == 3:
p1 = parts[0].strip()
p2 = parts[1].strip()
p3 = parts[2].strip()
# print "CASE2: (%s) (%s) (%s)" % (p1, p2, p3)
if p1 + '\t' + p2 + '\t' + p3 in locs:
return locs[p1 + '\t' + p2 + '\t' + p3]
elif p1 + '\t' + p2 in locs:
return locs[p1 + '\t' + p2]
elif p1 + '\t' + p3 in locs:
return locs[p1 + '\t' + p3]
elif p1 in locs:
return locs[p1]
if p1.find("st.") >= 0:
p1 = re.sub("st.", "saint", p1)
if p1 + '\t' + p2 + '\t' + p3 in locs:
return locs[p1 + '\t' + p2 + '\t' + p3]
elif p1 + '\t' + p2 in locs:
return locs[p1 + '\t' + p2]
elif p1 + '\t' + p3 in locs:
return locs[p1 + '\t' + p3]
elif p1 in locs:
return locs[p1]
if p1.find("saint") >= 0:
p1 = re.sub("saint", "st.", p1)
if p1 + '\t' + p2 + '\t' + p3 in locs:
return locs[p1 + '\t' + p2 + '\t' + p3]
elif p1 + '\t' + p2 in locs:
return locs[p1 + '\t' + p2]
elif p1 + '\t' + p3 in locs:
return locs[p1 + '\t' + p3]
elif p1 in locs:
return locs[p1]
else:
pass #print "CASE5: %s" % (parts)
# Otherwise no delimiters so we're left to guess at where the name
# breaks
else:
parts = re.split(r'[ \t\n\r]+', name)
if len(parts) == 2:
p1 = parts[0]
p2 = parts[1]
#print "CASE3: (%s) (%s)" % (p1, p2)
if p1 + '\t' + p2 in locs:
return locs[p1 + '\t' + p2]
elif p2 + '\t' + p1 in locs:
return locs[p2 + '\t' + p1]
elif p1 in locs:
return locs[p1]
if p1.find("st.") >= 0:
p1 = re.sub("st.", "saint", p1)
if p1 + '\t' + p2 in locs:
return locs[p1 + '\t' + p2]
elif p2 + '\t' + p1 in locs:
return locs[p2 + '\t' + p1]
elif p1 in locs:
return locs[p1]
elif p1.find("saint") >= 0:
p1 = re.sub("saint", "st.", p1)
if p1 + '\t' + p2 in locs:
return locs[p1 + '\t' + p2]
elif p2 + '\t' + p1 in locs:
return locs[p2 + '\t' + p1]
elif p1 in locs:
return locs[p1]
elif len(parts) > 2:
# Guess that the last name is a country/state and try
# city/<whatever>
#print "CASE4: %s" % (parts)
last = parts[-1]
city = ' '.join(parts[:-1])
if city + '\t' + last in locs:
return locs[city + '\t' + last]
else:
pass #print "CASE6: %s" % (parts)
#print "FOUND? %s ('%s') -> %s" % (location_name, name, lat_lon)
return None
def geonames_data():
"""
Returns the file contents of the geolite dataset.
"""
file_contents = []
file_name = "resources/geonames_countries.tsv"
with open(file_name, 'r') as fin:
csvreader = csv.reader(fin, delimiter="\t")
for line in csvreader:
file_contents.append(line)
return file_contents
def state_abbv_data():
"""
Returns a dict containing state abbreviations
"""
file_name = "resources/state_table.csv"
abbv_to_state = {}
with open(file_name, 'r') as csv_file:
line_no = 0
for line in csv.reader(csv_file, delimiter=',', quotechar='"'):
line_no += 1
if line_no == 1:
continue
name = line[1].lower()
abbv = line[2].lower()
if len(abbv) > 0:
# print "%s -> %s" % (abbv, name)
abbv_to_state[abbv] = name
return abbv_to_state
def extract_user_id(post):
return post["user"]["id_str"]
def extract_user_location(post):
return post["user"]["location"]
def main():
# Generate geocoder from preprocessed CSV
print("Starting...")
gc = Geocoder()
print("Geocoder created with {0} places.".format(len(gc.lc_name_to_location)))
location_field_data_fn = "../sample_tweets.json.gz"
output_fn = "../sample_dataset/users.home-locations.geo-median.tsv"
uidToLocations = {}
fin = gzip.open(location_field_data_fn)
for line in fin:
try:
line = line.strip().replace(r'\\"', r'\"')
post = json.loads(line)
uid = extract_user_id(post)
if(uidToLocations.get(uid, None) == None):
uidToLocations[uid] = extract_user_location(post)
except:
continue
print len(uidToLocations.keys())
i = 0
with open(output_fn, "w") as fout:
csvwriter = csv.writer(fout, delimiter='\t')
for uid, location in uidToLocations.iteritems():
if location:
pt = gc.geocode_noisy(location)
else:
pt = None
if pt == None:
continue
csvwriter.writerow([uid, pt[0], pt[1]])
i += 1
print i
'''
# Loop through user location field entries and geocode them
with open(location_field_data_fn, "r") as fin:
csvreader = csv.reader(fin)
header = ['uid', 'loc_field']
assert next(csvreader) == header
with open(output_fn, "w") as fout:
csvwriter = csv.writer(fout, delimiter='\t')
csvwriter.writerow(header + ['pt'])
count_geolocated = 0
count_tried = 0
for line in csvreader:
uid = line[0]
self_reported_location = line[1]
if self_reported_location:
pt = gc.geocode_noisy(self_reported_location)
else:
pt = None
if pt = None:
continue
count_tried += 1
csvwriter.writerow([uid, pt[0], pt[1]])
if pt:
count_geolocated += 1
if count_tried % 10000 == 0:
print("{0} located out of {1} tried.".format(count_geolocated, count_tried))
'''
if __name__ == "__main__":
main()
| |
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
from collections import deque
import logging
import os
import re
import nltk
nltk3 = nltk.__version__.startswith('3')
import commoncode
from textcode import analysis
from cluecode import copyrights_hint
logger = logging.getLogger(__name__)
if os.environ.get('SC_COPYRIGHT_DEBUG'):
import sys
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logger.setLevel(logging.DEBUG)
"""
Detect and collect copyright statements.
The process consists in:
- prepare and cleanup text
- identify regions of text that may contain copyright (using hints)
- tag the text for parts-of-speech (POS) to identify various copyright
statements parts such as dates, names ("named entities"), etc. This is done
using NLTK POS tagging
- feed the tagged text to a parsing grammar describing actual copyright
statements
- yield copyright statements, years, holder and authors with start and end line
from the parse tree, eventually performing some minor cleanups.
"""
def detect_copyrights(location):
"""
Yield tuples of:
(copyrights list, authors list, years list, holders list, start line, end line)
detected in file at location.
"""
detector = CopyrightDetector()
for numbered_lines in candidate_lines(analysis.text_lines(location)):
detected = detector.detect(numbered_lines)
cp, auth, yr, hold, _start, _end = detected
if any([cp, auth, yr, hold]):
yield detected
def detect(location):
"""
Return lists of detected copyrights, authors, years and holders
in file at location.
Deprecated legacy entry point.
"""
copyrights = []
authors = []
years = []
holders = []
for cp, auth, yr, hold, _start, _end in detect_copyrights(location):
copyrights.extend(cp)
authors.extend(auth)
years.extend(yr)
holders.extend(hold)
return copyrights, authors, years, holders
# FIXME: multi-tokens patterns are likely not behaving as expected
# FIXME: patterns could be greatly simplified
patterns = [
# TODO: this needs to be simplified:
# TODO: in NLTK 3.0 this will fail because of this bug:
# https://github.com/nltk/nltk/issues/1025
# JUNK are things to ignore
# All Rights Reserved. should be a terminator/delimiter.
(r'^([Aa]ll [Rr]ights? [Rr]eserved|ALL RIGHTS? RESERVED|[Aa]ll|ALL)$', 'JUNK'),
(r'^([Rr]eserved|RESERVED)[,]?$', 'JUNK'),
# found in crypto certificates and LDAP
(r'^(O=|OU=|OU|XML)$', 'JUNK'),
(r'^(Parser|Dual|Crypto|NO|PART|[Oo]riginall?y?|[Rr]epresentations?\.?)$', 'JUNK'),
(r'^(Refer|Apt|Agreement|Usage|Please|Based|Upstream|Files?|Filename:?|Description:?|Holder?s|HOLDER?S|[Pp]rocedures?|You|Everyone)$', 'JUNK'),
(r'^(Rights?|Unless|rant|Subject|Acknowledgements?|Special)$', 'JUNK'),
(r'^(Derivative|Work|[Ll]icensable|[Ss]ince|[Ll]icen[cs]e[\.d]?|[Ll]icen[cs]ors?|under|COPYING)$', 'JUNK'),
(r'^(TCK|Use|[Rr]estrictions?|[Ii]ntroduction)$', 'JUNK'),
(r'^([Ii]ncludes?|[Vv]oluntary|[Cc]ontributions?|[Mm]odifications?)$', 'JUNK'),
(r'^(CONTRIBUTORS?|OTHERS?|Contributors?\:)$', 'JUNK'),
(r'^(Company:|For|File|Last|[Rr]elease|[Cc]opyrighting)$', 'JUNK'),
(r'^Authori.*$', 'JUNK'),
(r'^[Bb]uild$', 'JUNK'),
#
(r'^Copyleft|LegalCopyright|AssemblyCopyright|Distributed$', 'JUNK'),
# Bare C char is COPYRIGHT SIGN
# (r'^C$', 'COPY'),
# exceptions to composed proper nouns, mostly debian copyright-related
# FIXME: may be lowercase instead?
(r'^(Title:?|Debianized-By:?|Upstream-Maintainer:?|Content-MD5)$', 'JUNK'),
(r'^(Upstream-Author:?|Packaged-By:?)$', 'JUNK'),
# NOT a copyright symbol (ie. "copyrighted."): treat as NN
(r'^[Cc](opyright(s|ed)?|OPYRIGHT(S|ED))\.$', 'NN'),
# copyright word or symbol
# note the leading @ .... this may be a source of problems
(r'.?(@?([Cc]opyright)s?:?|[(][Cc][)]|(COPYRIGHT)S?:?)', 'COPY'),
# copyright in markup, until we strip markup: apache'>Copyright
(r'[A-Za-z0-9]+[\'">]+[Cc]opyright', 'COPY'),
# company suffix
(r'^([Ii]nc[.]?|[I]ncorporated|[Cc]ompany|Limited|LIMITED).?$', 'COMP'),
# company suffix
(r'^(INC(ORPORATED|[.])?|CORP(ORATION|[.])?|FOUNDATION|GROUP|COMPANY|[(]tm[)]).?$|[Ff]orum.?', 'COMP'),
# company suffix
(r'^([cC]orp(oration|[.])?|[fF]oundation|[Aa]lliance|Working|[Gg]roup|[Tt]echnolog(y|ies)|[Cc]ommunit(y|ies)|[Mm]icrosystems.?|[Pp]roject|[Tt]eams?|[Tt]ech).?$', 'COMP'),
# company suffix : LLC, LTD, LLP followed by one extra char
(r'^([Ll][Ll][CcPp]|[Ll][Tt][Dd])\.,$', 'COMP'),
(r'^([Ll][Ll][CcPp]|[Ll][Tt][Dd])\.?,?$', 'COMP'),
(r'^([Ll][Ll][CcPp]|[Ll][Tt][Dd])\.$', 'COMP'),
# company suffix : SA, SAS, AG, AB, AS, CO, labs followed by a dot
(r'^(S\.?A\.?S?|Sas|sas|A[GBS]|Labs?|[Cc][Oo]\.|Research|INRIA).?$', 'COMP'),
# (german) company suffix
(r'^[Gg][Mm][Bb][Hh].?$', 'COMP'),
# university
(r'^[Uu]niv([.]|ersit(y|e|at?|ad?))$', 'UNI'),
# institutes
(r'^[Ii]nstitut(s|o|os|e|es|et|a|at|as|u|i)?$', 'NNP'),
# "holders" is considered as a common noun
(r'^([Hh]olders?|HOLDERS?|[Rr]espective)$', 'NN'),
# (r'^[Cc]ontributors?\.?', 'NN'),
# "authors" or "contributors" is interesting, and so a tag of its own
(r'^[Aa]uthors?$', 'AUTH'),
(r'^[Aa]uthor\(s\)$', 'AUTH'),
(r'^[Cc]ontribut(ors?|ing)\.?$', 'AUTH'),
# commiters is interesting, and so a tag of its own
(r'[Cc]ommitters?', 'COMMIT'),
# same for maintainer, developed, etc...
(r'^(([Rr]e)?[Cc]oded|[Mm]odified|[Mm]ai?nt[ea]ine(d|r)|[Ww]ritten|[Dd]eveloped)$', 'AUTH2'),
# author
(r'@author', 'AUTH'),
# of
(r'^[Oo][Ff]|[Dd][Ee]$', 'OF'),
# in
(r'^in$', 'IN'),
# by
(r'^by$', 'BY'),
# conjunction: and
(r'^([Aa]nd|&)$', 'CC'),
# conjunction: or. Even though or is not conjunctive ....
# (r'^or$', 'CC'),
# conjunction: or. Even though or is not conjunctive ....
# (r'^,$', 'CC'),
# ie. in things like "Copyright (c) 2012 John Li and others"
(r'^others$', 'OTH'),
# in year ranges: dash, or 'to': "1990-1995", "1990/1995" or "1990 to 1995"
(r'^([-/]|to)$', 'DASH'),
# explicitly ignoring these words: FIXME: WHY?
(r'^([Tt]his|THIS|[Pp]ermissions?|PERMISSIONS?|All)$', 'NN'),
# in dutch/german names, like Marco van Basten, or Klemens von Metternich
# and Spanish/French Da Siva and De Gaulle
(r'^(([Vv][ao]n)|[Dd][aeu])$', 'VAN'),
# year
(r'^[(]?(19|20)[0-9]{2}((\s)*([,-]|to)(\s)*(19|20)?[0-9]{2})*[)]?', 'YR'),
# cardinal numbers
(r'^-?[0-9]+(.[0-9]+)?.?$', 'CD'),
# exceptions to proper nouns
(r'^(The|Commons|AUTHOR|software)$', 'NN'),
# composed proper nouns, ie. Jean-Claude or ST-Microelectronics
# FIXME: what about a variant with spaces around the dash?
(r'^[A-Z][a-zA-Z]*[-][A-Z]?[a-zA-Z]+.?$', 'NNP'),
# proper nouns with digits
(r'^[A-Z][a-z0-9]+.?$', 'NNP'),
# saxon genitive, ie. Philippe's
(r"^[A-Z][a-z]+[']s$", 'NNP'),
# dotted name, ie. P.
(r"^([A-Z][.]?|[A-Z]+[\.])$", 'PN'),
# proper noun with some separator and trailing comma
(r"^[A-Z]+[.][A-Z][a-z]+[,]?$", 'NNP'),
# proper noun with apostrophe ': D'Orleans, D'Arcy, T'so, Ts'o
(r"^[A-Z][[a-z]?['][A-Z]?[a-z]+[,.]?$", 'NNP'),
# proper noun with apostrophe ': d'Itri
(r"^[a-z]['][A-Z]?[a-z]+[,\.]?$", 'NNP'),
# all CAPS word, at least 1 char long such as MIT, including an optional trailing comma or dot
(r'^[A-Z0-9]+[,]?$', 'CAPS'),
# all caps word 3 chars and more, enclosed in parens
(r'^\([A-Z0-9]{2,}\)$', 'CAPS'),
# proper noun:first CAP, including optional trailing comma
(r'^[A-Z][a-zA-Z0-9]+[,]?$', 'NNP'),
# email
(r'[a-zA-Z0-9\+_\-\.\%]+@[a-zA-Z0-9][a-zA-Z0-9\+_\-\.\%]*\.[a-zA-Z]{2,5}?', 'EMAIL'),
# email eventually in parens or brackets. The closing > or ) is optional
(r'[\<\(][a-zA-Z0-9\+_\-\.\%]+@[a-zA-Z0-9][a-zA-Z0-9\+_\-\.\%]*\.[a-zA-Z]{2,5}?[\>\)]?', 'EMAIL'),
# URLS such as ibm.com
# TODO: add more extensions?
(r'<?a?.(href)?.[a-z0-9A-Z\-\.\_]+\.(com|net|info|org|us|io|edu|co\.[a-z][a-z]|eu|biz)', 'URL'),
# derived from regex in cluecode.finder
(r'<?a?.(href)?.('
r'(?:http|ftp|sftp)s?://[^\s<>\[\]"]+'
r'|(?:www|ftp)\.[^\s<>\[\]"]+'
r')', 'URL'),
# AT&T (the company), needed special handling
(r'^AT&T$', 'ATT'),
# comma as a conjunction
(r'^,$', 'CC'),
# .\ is not a noun
(r'^\.\\$', 'JUNK'),
# nouns (default)
(r'.+', 'NN'),
]
# Comments in the Grammar are lines that start with #
grammar = """
COPY: {<COPY>}
YR-RANGE: {<YR>+ <CC> <YR>}
YR-RANGE: {<YR> <DASH>* <YR|CD>+}
YR-RANGE: {<CD>? <YR>+}
YR-RANGE: {<YR>+ }
NAME: {<NNP> <VAN|OF> <NN*> <NNP>}
NAME: {<NNP> <PN> <VAN> <NNP>}
# the Regents of the University of California
COMPANY: {<BY>? <NN> <NNP> <OF> <NN> <UNI> <OF> <COMPANY|NAME|NAME2|NAME3><COMP>?}
# "And" some name
ANDCO: {<CC>+ <NN> <NNP>+<UNI|COMP>?}
ANDCO: {<CC>+ <NNP> <NNP>+<UNI|COMP>?}
ANDCO: {<CC>+ <COMPANY|NAME|NAME2|NAME3>+<UNI|COMP>?}
COMPANY: {<COMPANY|NAME|NAME2|NAME3> <ANDCO>+}
# rare "Software in the public interest, Inc."
COMPANY: {<COMP> <CD> <COMP>}
COMPANY: {<NNP> <IN><NN> <NNP> <NNP>+<COMP>?}
COMPANY: {<NNP> <CC> <NNP> <COMP>}
COMPANY: {<NNP|CAPS> <NNP|CAPS>? <NNP|CAPS>? <NNP|CAPS>? <NNP|CAPS>? <NNP|CAPS>? <COMP> <COMP>?}
COMPANY: {<UNI|NNP> <VAN|OF> <NNP>+ <UNI>?}
COMPANY: {<NNP>+ <UNI>}
COMPANY: {<COMPANY> <CC> <COMPANY>}
COMPANY: {<ATT> <COMP>?}
COMPANY: {<COMPANY> <CC> <NNP>}
# Group 42, Inc
# Typical names
NAME: {<NNP|PN>+ <NNP>+}
NAME: {<NNP> <PN>? <NNP>+}
NAME: {<NNP> <NNP>}
NAME: {<NNP> <NN> <EMAIL>}
NAME: {<NNP> <PN|VAN>? <PN|VAN>? <NNP>}
NAME: {<NNP> <NN> <NNP>}
NAME: {<NNP> <COMMIT>}
NAME: {<NN> <NNP> <ANDCO>}
NAME: {<NN>? <NNP> <CC> <NAME>}
NAME: {<NN>? <NNP> <OF> <NN>? <NNP> <NNP>?}
NAME: {<NAME> <CC> <NAME>}
COMPANY: {<NNP> <IN> <NN>? <COMPANY>}
NAME2: {<NAME> <EMAIL>}
NAME3: {<YR-RANGE> <NAME2|COMPANY>+}
NAME: {<NAME|NAME2>+ <OF> <NNP> <OF> <NN>? <COMPANY>}
NAME: {<NAME|NAME2>+ <CC|OF>? <NAME|NAME2|COMPANY>}
NAME3: {<YR-RANGE> <NAME>+}
NAME: {<NNP> <OF> <NNP>}
NAME: {<NAME> <NNP>}
NAME: {<NN|NNP|CAPS>+ <CC> <OTH>}
NAME: {<NNP> <CAPS>}
NAME: {<CAPS> <DASH>? <NNP|NAME>}
NAME: {<NNP> <CD> <NNP>}
NAME: {<COMP> <NAME>+}
NAME: {<NNP|CAPS>+ <AUTH>}
# Companies
COMPANY: {<NAME|NAME2|NAME3|NNP>+ <OF> <NN>? <COMPANY|COMP>}
COMPANY: {<NNP> <COMP> <COMP>}
COMPANY: {<NN>? <COMPANY|NAME|NAME2> <CC> <COMPANY|NAME|NAME2>}
COMPANY: {<COMP|NNP> <NN> <COMPANY> <NNP>+}
COMPANY: {<COMPANY> <CC> <AUTH>}
COMPANY: {<NN> <COMP>+}
COMPANY: {<URL>}
# Trailing Authors
COMPANY: {<NAME|NAME2|NAME3|NNP>+ <AUTH>}
# "And" some name
ANDCO: {<CC> <NNP> <NNP>+}
ANDCO: {<CC> <COMPANY|NAME|NAME2|NAME3>+}
COMPANY: {<COMPANY|NAME|NAME2|NAME3> <ANDCO>+}
NAME: {<BY> <NN> <AUTH>}
# Various forms of copyright statements
COPYRIGHT: {<COPY> <NAME> <COPY> <YR-RANGE>}
COPYRIGHT: {<COPY> <COPY> <BY>? <COMPANY|NAME*|YR-RANGE>* <BY>? <EMAIL>+}
COPYRIGHT: {<COPY> <BY>? <COMPANY|NAME*|YR-RANGE>* <BY>? <EMAIL>+}
COPYRIGHT: {<COPY> <COPY> <NAME|NAME2|NAME3> <CAPS> <YR-RANGE>}
COPYRIGHT: {<COPY> <NAME|NAME2|NAME3> <CAPS> <YR-RANGE>}
COPYRIGHT: {<COPY> <COPY> <NAME|NAME2|NAME3>+ <YR-RANGE>*}
COPYRIGHT: {<COPY> <NAME|NAME2|NAME3>+ <YR-RANGE>*}
COPYRIGHT: {<COPY> <COPY> <CAPS|NNP>+ <CC> <NN> <COPY> <YR-RANGE>?}
COPYRIGHT: {<COPY> <CAPS|NNP>+ <CC> <NN> <COPY> <YR-RANGE>?}
COPYRIGHT: {<COPY> <COPY> <BY>? <COMPANY|NAME*>+ <YR-RANGE>*}
COPYRIGHT: {<COPY> <BY>? <COMPANY|NAME*>+ <YR-RANGE>*}
COPYRIGHT: {<NNP>? <COPY> <COPY> (<YR-RANGE>+ <BY>? <NN>? <COMPANY|NAME|NAME2>+ <EMAIL>?)+}
COPYRIGHT: {<NNP>? <COPY> (<YR-RANGE>+ <BY>? <NN>? <COMPANY|NAME|NAME2>+ <EMAIL>?)+}
COPYRIGHT: {<COPY> <COPY> <NN> <NAME> <YR-RANGE>}
COPYRIGHT: {<COPY> <NN> <NAME> <YR-RANGE>}
COPYRIGHT: {<COPY> <COPY> <COMP>+}
COPYRIGHT: {<COPY> <COPY> <NN>+ <COMPANY|NAME|NAME2>+}
COPYRIGHT: {<COPY> <COPY> <NN> <NN>? <COMP> <YR-RANGE>?}
COPYRIGHT: {<COPY> <NN> <NN>? <COMP> <YR-RANGE>?}
COPYRIGHT: {<COPY> <COPY> <NN> <NN>? <COMP> <YR-RANGE>?}
COPYRIGHT: {<COPY> <NN> <NN>? <COMPANY> <YR-RANGE>?}
COPYRIGHT: {<COPY> <COPY> <YR-RANGE|NNP> <CAPS|BY>? <NNP|YR-RANGE|NAME>+}
COPYRIGHT: {<COPY> <YR-RANGE|NNP> <CAPS|BY>? <NNP|YR-RANGE|NAME>+}
COPYRIGHT: {<COPY> <COPY> <NNP>+}
# Copyright (c) 1995, 1996 The President and Fellows of Harvard University
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <NN> <NNP> <ANDCO>}
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <NN> <AUTH>}
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <BY> <NN> <NN> <NAME>}
COPYRIGHT2: {<COPY> <YR-RANGE> <BY> <NN> <NN> <NAME>}
COPYRIGHT2: {<COPY> <COPY><NN>? <COPY> <YR-RANGE> <BY> <NN>}
COPYRIGHT2: {<COPY> <NN>? <COPY> <YR-RANGE> <BY> <NN>}
COPYRIGHT2: {<COPY> <COPY><NN> <YR-RANGE> <BY> <NAME>}
COPYRIGHT2: {<COPY> <NN> <YR-RANGE> <BY> <NAME>}
COPYRIGHT2: {<COPY> <COPY><YR-RANGE> <DASH> <NAME2|NAME>}
COPYRIGHT2: {<COPY> <YR-RANGE> <DASH> <NAME2|NAME>}
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <NNP> <NAME>}
COPYRIGHT2: {<COPY> <YR-RANGE> <NNP> <NAME>}
COPYRIGHT2: {<NAME> <COPY> <YR-RANGE>}
COPYRIGHT2: {<COPY> <COPY> <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>*}
COPYRIGHT2: {<COPY> <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>*}
COPYRIGHT2: {<COPY> <COPY> <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>* <COMPANY>}
COPYRIGHT2: {<COPY> <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>* <COMPANY>}
COPYRIGHT2: {<COPY> <COPY> <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>* <DASH> <COMPANY>}
COPYRIGHT2: {<COPY> <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>* <DASH> <COMPANY>}
COPYRIGHT2: {<NNP|NAME|COMPANY> <COPYRIGHT2>}
COPYRIGHT: {<COPYRIGHT> <NN> <COMPANY>}
COPYRIGHT: {<COPY> <COPY> <BY>? <NN> <COMPANY>}
COPYRIGHT: {<COPY> <BY>? <NN> <COMPANY>}
COPYRIGHT: {<COMPANY> <NN> <NAME> <COPYRIGHT2>}
COPYRIGHT: {<COPYRIGHT2> <COMP> <COMPANY>}
COPYRIGHT: {<COMPANY> <NN> <COPYRIGHT2>}
COPYRIGHT: {<COPYRIGHT2> <NNP> <CC> <COMPANY>}
# copyrights in the style of Scilab/INRIA
COPYRIGHT: {<NNP> <NN> <COPY> <NNP>}
COPYRIGHT: {<NNP> <COPY> <NNP>}
# Authors
AUTH: {<AUTH2>+ <BY>}
AUTHOR: {<AUTH>+ <NN>? <COMPANY|NAME|YR-RANGE>* <BY>? <EMAIL>+}
AUTHOR: {<AUTH>+ <NN>? <COMPANY|NAME|NAME2>+ <YR-RANGE>*}
AUTHOR: {<AUTH>+ <YR-RANGE>+ <BY>? <COMPANY|NAME|NAME2>+}
AUTHOR: {<AUTH>+ <YR-RANGE|NNP> <NNP|YR-RANGE>+}
AUTHOR: {<AUTH>+ <NN|CAPS>? <YR-RANGE>+}
AUTHOR: {<COMPANY|NAME|NAME2>+ <AUTH>+ <YR-RANGE>+}
AUTHOR: {<YR-RANGE> <NAME|NAME2>+}
AUTHOR: {<NAME2>+}
AUTHOR: {<AUTHOR> <CC> <NN>? <AUTH>}
AUTHOR: {<BY> <EMAIL>}
ANDAUTH: {<CC> <AUTH|NAME>+}
AUTHOR: {<AUTHOR> <ANDAUTH>+}
# Compounded statements usings authors
# found in some rare cases with a long list of authors.
COPYRIGHT: {<COPY> <BY> <AUTHOR>+ <YR-RANGE>*}
COPYRIGHT: {<AUTHOR> <COPYRIGHT2>}
COPYRIGHT: {<AUTHOR> <YR-RANGE>}
"""
def strip_numbers(s):
"""
Return a string removing words made only of numbers. If there is an
exception or s is not a string, return s as-is.
"""
if s:
s = u' '.join([x for x in s.split(' ') if not x.isdigit()])
return s
def strip_some_punct(s):
"""
Return a string stripped from some leading and trailing punctuations.
"""
if s:
s = s.strip(''','"};''')
s = s.lstrip(')')
s = s.rstrip('&(-_')
return s
def fix_trailing_space_dot(s):
"""
Return a string stripped from some leading and trailing punctuations.
"""
if s and s.endswith(' .'):
s = s[:-2] + '.'
return s
def strip_unbalanced_parens(s, parens='()'):
"""
Return a string where unbalanced parenthesis are replaced with a space.
`paren` is a pair of characters to balance such as (), <>, [] , {}.
For instance:
>>> strip_unbalanced_parens('This is a super string', '()')
'This is a super string'
>>> strip_unbalanced_parens('This is a super(c) string', '()')
'This is a super(c) string'
>>> strip_unbalanced_parens('This ((is a super(c) string))', '()')
'This ((is a super(c) string))'
>>> strip_unbalanced_parens('This )(is a super(c) string)(', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens(u'This )(is a super(c) string)(', '()')
u'This (is a super(c) string) '
>>> strip_unbalanced_parens('This )(is a super(c) string)(', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens('This )((is a super(c) string)((', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens('This ) is', '()')
'This is'
>>> strip_unbalanced_parens('This ( is', '()')
'This is'
>>> strip_unbalanced_parens('This )) is', '()')
'This is'
>>> strip_unbalanced_parens('This (( is', '()')
'This is'
>>> strip_unbalanced_parens('(', '()')
' '
>>> strip_unbalanced_parens(')', '()')
' '
"""
start, end = parens
if not start in s and not end in s:
return s
unbalanced = []
stack = []
for i, c in enumerate(s):
if c == start:
stack.append((i, c,))
elif c == end:
try:
stack.pop()
except IndexError:
unbalanced.append((i, c,))
unbalanced.extend(stack)
pos_to_del = set([i for i, c in unbalanced])
cleaned = [c if i not in pos_to_del else ' ' for i, c in enumerate(s)]
return type(s)('').join(cleaned)
def refine_copyright(c):
"""
Refine a detected copyright string.
FIXME: the grammar should not allow this to happen.
"""
c = strip_some_punct(c)
c = fix_trailing_space_dot(c)
c = strip_unbalanced_parens(c, '()')
c = strip_unbalanced_parens(c, '<>')
c = strip_unbalanced_parens(c, '[]')
c = strip_unbalanced_parens(c, '{}')
# FIXME: this should be in the grammar, but is hard to get there right
# these are often artifacts of markup
c = c.replace('Copyright Copyright', 'Copyright')
c = c.replace('Copyright copyright', 'Copyright')
c = c.replace('copyright copyright', 'Copyright')
c = c.replace('copyright Copyright', 'Copyright')
c = c.replace('copyright\'Copyright', 'Copyright')
c = c.replace('copyright"Copyright', 'Copyright')
c = c.replace('copyright\' Copyright', 'Copyright')
c = c.replace('copyright" Copyright', 'Copyright')
s = c.split()
# fix traliing garbage, captured by the grammar
if s[-1] in ('Parts', 'Any',):
s = s[:-1]
# this is hard to catch otherwise, unless we split the author
# vs copyright grammar in two. Note that AUTHOR and Authors should be kept
if s[-1] == 'Author':
s = s[:-1]
s = u' '.join(s)
return s.strip()
def refine_author(c):
"""
Refine a detected author
FIXME: the grammar should not allow this to happen.
"""
c = strip_some_punct(c)
c = strip_numbers(c)
c = strip_unbalanced_parens(c, '()')
c = strip_unbalanced_parens(c, '<>')
c = strip_unbalanced_parens(c, '[]')
c = strip_unbalanced_parens(c, '{}')
c = c.split()
# this is hard to catch otherwise, unless we split the author vs copyright grammar in two
if c[0].lower() == 'author':
c = c[1:]
c = u' '.join(c)
return c.strip()
def refine_date(c):
"""
Refine a detected date or date range.
FIXME: the grammar should not allow this to happen.
"""
c = strip_some_punct(c)
return c
def is_junk(c):
"""
Return True if string `c` is a junk copyright that cannot be resolved
otherwise by the parsing.
It would be best not to have to resort to this, but this is practical.
"""
junk = set([
'copyrighted by their authors',
'copyrighted by their authors.',
'copyright holder or other authorized',
'copyright holder who authorizes',
'copyright holder has authorized',
'copyright holder nor the author',
'copyright holder(s) or the author(s)',
'copyright owner or entity authorized',
'copyright owner or contributors',
'copyright for a new language file should be exclusivly the authors',
'copyright holder or said author',
'copyright holder, or any author',
'copyrighted material, only this license, or another one contracted with the authors',
'copyright notices, authorship',
'copyright holder means the original author(s)',
"copyright notice. timevar.def's author",
"copyright holder or simply that it is author-maintained'.",
"copyright holder or simply that is author-maintained'.",
'(c) if you bring a patent claim against any contributor',
'copyright-check writable-files m4-check author_mark_check',
# 'copyrighting it yourself or claiming authorship'
])
return c.lower() in junk
class CopyrightDetector(object):
"""
Class to detect copyrights and authorship.
"""
def __init__(self):
self.tagger = nltk.RegexpTagger(patterns)
self.chunker = nltk.RegexpParser(grammar)
@staticmethod
def as_str(node):
"""
Return a parse tree node as a space-normalized string.
"""
node_string = ' '.join(k for k, _ in node.leaves())
return u' '.join(node_string.split())
def detect(self, numbered_lines):
"""
Return a sequence of tuples (copyrights, authors, years, holders)
detected in a sequence of numbered line tuples.
"""
numbered_lines = list(numbered_lines)
numbers = [n for n, _l in numbered_lines]
start_line = min(numbers)
end_line = max(numbers)
logger.debug('CopyrightDetector:detect:lines numbers: %(start_line)d->%(end_line)d' % locals())
tokens = self.get_tokens(numbered_lines)
# we accumulate detected items in these synchronized lists
# this could be a single list of namedtuples
# or a list of dicts instead
copyrights, authors, years, holders = [], [], [], []
if not tokens:
return copyrights, authors, years, holders, None, None
# first, POS tag each token using token regexes
tagged_text = self.tagger.tag(tokens)
logger.debug('CopyrightDetector:tagged_text: ' + str(tagged_text))
# then build a parse tree based on tagged tokens
tree = self.chunker.parse(tagged_text)
logger.debug('CopyrightDetector:parse tree: ' + str(tree))
def collect_year_and_holder(detected_copyright):
"""
Walk the a parse sub-tree starting with the `detected_copyright`
node collecting all years and holders.
"""
for copyr in detected_copyright:
if isinstance(copyr, nltk.tree.Tree):
logger.debug('n: ' + str(copyr))
node_text = CopyrightDetector.as_str(copyr)
if 'YR-RANGE' in (copyr.label() if nltk3 else copyr.node):
years.append(refine_date(node_text))
elif ('NAME' == (copyr.label() if nltk3 else copyr.node)
or 'COMPANY' in (copyr.label() if nltk3 else copyr.node)):
# FIXME : this would wreck things like 23andme
# where a company name contains numbers
holders.append(refine_author(node_text))
logger.debug('CopyrightDetector: node_text: ' + node_text)
collect_year_and_holder(copyr)
# then walk the parse tree, collecting copyrights, years and authors
for tree_node in tree:
if isinstance(tree_node, nltk.tree.Tree):
node_text = CopyrightDetector.as_str(tree_node)
if 'COPYRIGHT' in (tree_node.label() if nltk3 else tree_node.node):
if node_text and node_text.strip():
refined = refine_copyright(node_text)
if not is_junk(refined):
copyrights.append(refined)
collect_year_and_holder(tree_node)
elif (tree_node.label() if nltk3 else tree_node.node) == 'AUTHOR':
authors.append(refine_author(node_text))
return copyrights, authors, years, holders, start_line, end_line
def get_tokens(self, numbered_lines):
"""
Return an iterable of tokens from lines of text.
"""
tokens = []
# simple tokenization: spaces and some punctuation
splitter = re.compile('[\\t =;]+')
for _line_number, line in numbered_lines:
line = line.strip()
if line:
line = prepare_text_line(line)
if line :
line = strip_markup(line)
if line and line.strip():
for tok in splitter.split(line):
# strip trailing quotes and ignore empties
tok = tok.strip("' ")
if not tok:
continue
# strip trailing colons: why?
tok = tok.rstrip(':').strip()
# strip leading @: : why?
tok = tok.lstrip('@').strip()
if tok and tok not in (':',):
tokens.append(tok)
logger.debug('CopyrightDetector:tokens: ' + repr(list(tokens)))
return tokens
def is_candidate(line):
"""
Return True if a line is a candidate line for copyright detection
"""
line = line.lower()
line = prepare_text_line(line)
return (has_content(line)
and any(s in line for s in copyrights_hint.statement_markers))
def has_content(line):
"""
Return True if a line has some content, ignoring white space, digit and
punctuation.
"""
return re.sub(r'\W+', '', line)
def is_all_rights_reserved(line):
"""
Return True if a line ends with "all rights reserved"-like statements.
"""
line = prepare_text_line(line)
# remove any non-character
line = re.sub(r'\W+', '', line)
line = line.strip()
line = line.lower()
return line.endswith(('rightreserved', 'rightsreserved'))
def candidate_lines(lines):
"""
Yield lists of candidate lines where each list element is a tuple of
(line number, line text).
A candidate line is a line of text that may contain copyright statements.
A few lines before and after a candidate line are also included.
"""
candidates = deque()
previous = None
# used as a state and line counter
in_copyright = 0
for line_number, line in enumerate(lines):
# the first line number is ONE, not zero
numbered_line = (line_number + 1, line)
if is_candidate(line):
# the state is now "in copyright"
in_copyright = 2
# we keep one line before a candidate line if any
if previous:
candidates.append(previous)
previous = None
# we keep the candidate line and yield if we reached the end
# of a statement
candidates.append(numbered_line)
if is_all_rights_reserved(line):
yield list(candidates)
candidates.clear()
in_copyright = 0
else:
if in_copyright:
# if the previous line was a candidate
# then we keep one line after that candidate line
if has_content(line):
candidates.append(numbered_line)
# and decrement our state
in_copyright -= 1
else:
if candidates:
yield list(candidates)
candidates.clear()
in_copyright = 0
else:
# if are neither a candidate line nor the line just after
# then we yield the accumulated lines if any
if candidates:
yield list(candidates)
candidates.clear()
# and we keep track of this line as "previous"
if has_content(line):
previous = numbered_line
else:
previous = None
# finally
if candidates:
yield list(candidates)
def strip_markup(text):
"""
Strip markup tags from text.
"""
html_tag_regex = re.compile(
r'<'
r'[(--)\?\!\%\/]?'
r'[a-zA-Z0-9#\"\=\s\.\;\:\%\&?!,\+\*\-_\/]+'
r'\/?>',
re.MULTILINE | re.UNICODE
)
if text:
text = re.sub(html_tag_regex, ' ', text)
return text
COMMON_WORDS = set([
'Unicode',
'Modified',
'NULL',
'FALSE', 'False',
'TRUE', 'True',
'Last',
'Predefined',
'If',
'Standard',
'Version', 'Versions',
'Package', 'PACKAGE',
'Powered',
'Licensed', 'License', 'License.' 'Licensee', 'License:', 'License-Alias:',
'Legal',
'Entity',
'Indemnification.',
'AS', 'IS',
'See',
'This',
'Java',
'DoubleClick',
'DOM', 'SAX', 'URL',
'Operating System',
'Original Software',
'Berkeley Software Distribution',
'Software Release', 'Release',
'IEEE Std',
'BSD',
'POSIX',
'Derivative Works',
'Intellij IDEA',
'README', 'NEWS',
'ChangeLog', 'CHANGElogger', 'Changelog',
'Redistribution',
])
def lowercase_well_known_word(text):
"""
Return text with certain words lowercased.
Rationale: some common words can start with a capital letter and be mistaken
for a named entity because capitalized words are often company names.
"""
lines = []
for line in text.splitlines(True):
words = []
for word in line.split():
if word in COMMON_WORDS:
word = word.lower()
words.append(word)
lines.append(' '.join(words))
return '\n'.join(lines)
# FIXME: instead of using functions, use plain re and let the re cache do its work
def IGNORED_PUNCTUATION_RE():
return re.compile(r'[*#"%\[\]\{\}`]+', re.I | re.M | re.U)
def ASCII_LINE_DECO_RE():
return re.compile(r'[-_=!\\*]{2,}')
def ASCII_LINE_DECO2_RE():
return re.compile(r'/{3,}')
def WHITESPACE_RE():
return re.compile(r' +')
def MULTIQUOTES_RE():
return re.compile(r"\'{2,}")
# TODO: add debian <s> </s> POS name taggings
def DEBIAN_COPYRIGHT_TAGS_RE():
return re.compile(r"(\<s\>|\<s\\/>)")
def prepare_text_line(line):
"""
Prepare a line of text for copyright detection.
"""
# FIXME: maintain the original character positions
# strip whitespace
line = line.strip()
# strip comment markers
# common comment characters
line = line.strip('\\/*#%;')
# un common comment line prefix in dos
line = re.sub('^rem ', ' ', line)
line = re.sub('^\@rem ', ' ', line)
# un common comment line prefix in autotools am/in
line = re.sub('^dnl ', ' ', line)
# un common comment line prefix in man pages
line = re.sub('^\.\\"', ' ', line)
# normalize copyright signs and spacing aournd them
line = line.replace('(C)', ' (c) ')
line = line.replace('(c)', ' (c) ')
# the case of \251 is tested by 'weirdencoding.h'
line = line.replace(u'\251', u' (c) ')
line = line.replace('©', ' (c) ')
line = line.replace('©', ' (c) ')
line = line.replace('©', ' (c) ')
line = line.replace(u'\xa9', ' (c) ')
# FIXME: what is \xc2???
line = line.replace(u'\xc2', '')
# TODO: add more HTML entities replacements
# see http://www.htmlhelp.com/reference/html40/entities/special.html
# convert html entities CR LF to space
line = line.replace(u' ', ' ')
line = line.replace(u' ', ' ')
line = line.replace(u' ', ' ')
# normalize (possibly repeated) quotes to unique single quote '
# backticks ` and "
line = line.replace(u'`', "'")
line = line.replace(u'"', "'")
line = re.sub(MULTIQUOTES_RE(), "'", line)
# quotes to space? but t'so will be wrecked
# line = line.replace(u"'", ' ')
# some trailing garbage ')
line = line.replace("')", ' ')
# note that we do not replace the debian tag by a space: we remove it
line = re.sub(DEBIAN_COPYRIGHT_TAGS_RE(), '', line)
line = re.sub(IGNORED_PUNCTUATION_RE(), ' ', line)
# tabs to spaces
line = line.replace('\t', ' ')
# normalize spaces around commas
line = line.replace(' , ', ', ')
# remove ASCII "line decorations"
# such as in --- or === or !!! or *****
line = re.sub(ASCII_LINE_DECO_RE(), ' ', line)
line = re.sub(ASCII_LINE_DECO2_RE(), ' ', line)
# Replace escaped literal \0 \n \r \t that may exist as-is by a space
# such as in code literals: a="\\n some text"
line = line.replace('\\r', ' ')
line = line.replace('\\n', ' ')
line = line.replace('\\t', ' ')
line = line.replace('\\0', ' ')
# TODO: Why?
# replace contiguous spaces with only one occurrence
# line = re.sub(WHITESPACE_RE(), ' ', text)
# normalize to ascii text
line = commoncode.text.toascii(line)
# logger.debug("ascii_only_text: " + text)
# strip verbatim back slash and comment signs again at both ends of a line
# FIXME: this is done at the start of this function already
line = line.strip('\\/*#%;')
# normalize to use only LF as line endings so we can split correctly
# and keep line endings
line = commoncode.text.unixlinesep(line)
# why?
line = lowercase_well_known_word(line)
return line
| |
import os
import sys
import shutil
import os.path
import uuid
from future.utils import iteritems
from pandaharvester.harvestercore import core_utils
from .base_stager import BaseStager
from pandaharvester.harvestermover import mover_utils
from rucio.client import Client as RucioClient
from rucio.common.exception import RuleNotFound
# logger
baseLogger = core_utils.setup_logger('rucio_stager')
# plugin for stage-out with Rucio
class RucioStager(BaseStager):
# constructor
def __init__(self, **kwarg):
BaseStager.__init__(self, **kwarg)
if not hasattr(self, 'scopeForTmp'):
self.scopeForTmp = 'panda'
# check status
def check_stage_out_status(self, jobspec):
# make logger
tmpLog = self.make_logger(baseLogger, 'PandaID={0}'.format(jobspec.PandaID),
method_name='check_stage_out_status')
tmpLog.debug('start')
# loop over all files
allChecked = True
oneErrMsg = None
transferStatus = dict()
for fileSpec in jobspec.outFiles:
# skip already don
if fileSpec.status in ['finished', 'failed']:
continue
# get transfer ID
transferID = fileSpec.fileAttributes['transferID']
if transferID not in transferStatus:
# get status
try:
rucioAPI = RucioClient()
ruleInfo = rucioAPI.get_replication_rule(transferID)
tmpTransferStatus = ruleInfo['state']
tmpLog.debug('got state={0} for rule={1}'.format(tmpTransferStatus, transferID))
except RuleNotFound:
tmpLog.error('rule {0} not found'.format(transferID))
tmpTransferStatus = 'FAILED'
except:
err_type, err_value = sys.exc_info()[:2]
errMsg = "{0} {1}".format(err_type.__name__, err_value)
tmpLog.error('failed to get status for rule={0} with {1}'.format(transferID, errMsg))
# set dummy not to lookup again
tmpTransferStatus = None
allChecked = False
# keep one message
if oneErrMsg is None:
oneErrMsg = errMsg
tmpTransferStatus = 'OK'
transferStatus[transferID] = tmpTransferStatus
# final status
if transferStatus[transferID] == 'OK':
fileSpec.status = 'finished'
elif transferStatus[transferID] in ['FAILED', 'CANCELED']:
fileSpec.status = 'failed'
if allChecked:
return True, ''
else:
return False, oneErrMsg
# trigger stage out
def trigger_stage_out(self, jobspec):
# make logger
tmpLog = self.make_logger(baseLogger, 'PandaID={0}'.format(jobspec.PandaID),
method_name='trigger_stage_out')
tmpLog.debug('start')
# loop over all files
files = dict()
transferIDs = dict()
transferDatasets = dict()
fileAttrs = jobspec.get_output_file_attributes()
for fileSpec in jobspec.outFiles:
# skip zipped files
if fileSpec.zipFileID is not None:
continue
# skip if already processed
if 'transferDataset' in fileSpec.fileAttributes:
if fileSpec.fileType not in transferDatasets:
transferDatasets[fileSpec.fileType] = fileSpec.fileAttributes['transferDataset']
if fileSpec.fileType not in transferIDs:
transferIDs[fileSpec.fileType] = fileSpec.fileAttributes['transferID']
continue
# set OS ID
if fileSpec.fileType == ['es_output', 'zip_output']:
fileSpec.objstoreID = self.objStoreID_ES
# make path where file is copied for transfer
if fileSpec.fileType != 'zip_output':
scope = fileAttrs[fileSpec.lfn]['scope']
datasetName = fileAttrs[fileSpec.lfn]['dataset']
else:
# use panda scope for zipped files
scope = self.scopeForTmp
datasetName = 'dummy'
srcPath = fileSpec.path
dstPath = mover_utils.construct_file_path(self.srcBasePath, scope, fileSpec.lfn)
# remove
if os.path.exists(dstPath):
os.remove(dstPath)
# copy
tmpLog.debug('copy src={srcPath} dst={dstPath}'.format(srcPath=srcPath, dstPath=dstPath))
dstDir = os.path.dirname(dstPath)
if not os.path.exists(dstDir):
os.makedirs(dstDir)
shutil.copyfile(srcPath, dstPath)
# collect files
tmpFile = dict()
tmpFile['scope'] = scope
tmpFile['name'] = fileSpec.lfn
tmpFile['bytes'] = fileSpec.fsize
if fileSpec.fileType not in files:
files[fileSpec.fileType] = []
files[fileSpec.fileType].append(tmpFile)
# loop over all file types to be registered to rucio
rucioAPI = RucioClient()
for fileType, fileList in iteritems(files):
# set destination RSE
if fileType in ['es_output', 'zip_output']:
dstRSE = self.dstRSE_ES
elif fileType == 'output':
dstRSE = self.dstRSE_Out
elif fileType == 'log':
dstRSE = self.dstRSE_Log
else:
errMsg = 'unsupported file type {0}'.format(fileType)
tmpLog.error(errMsg)
return (False, errMsg)
# skip if destination is None
if dstRSE is None:
continue
# make datasets if missing
if fileType not in transferDatasets:
try:
tmpScope = self.scopeForTmp
tmpDS = 'panda.harvester_stage_out.{0}'.format(str(uuid.uuid4()))
rucioAPI.add_dataset(tmpScope, tmpDS,
meta={'hidden': True},
lifetime=30*24*60*60,
files=fileList,
rse=self.srcRSE
)
transferDatasets[fileType] = tmpDS
# add rule
tmpDID = dict()
tmpDID['scope'] = tmpScope
tmpDID['name'] = tmpDS
tmpRet = rucioAPI.add_replication_rule([tmpDID], 1, dstRSE,
lifetime=30*24*60*60
)
tmpTransferIDs = tmpRet[0]
transferIDs[fileType] = tmpTransferIDs
tmpLog.debug('register dataset {0} with rule {1}'.format(tmpDS, str(tmpTransferIDs)))
except:
errMsg = core_utils.dump_error_message(tmpLog)
return (False, errMsg)
else:
# add files to existing dataset
try:
tmpScope = self.scopeForTmp
tmpDS = transferDatasets[fileType]
rucioAPI.add_files_to_dataset(tmpScope, tmpDS, fileList, self.srcRSE)
tmpLog.debug('added files to {0}'.format(tmpDS))
except:
errMsg = core_utils.dump_error_message(tmpLog)
return (False, errMsg)
# set transfer datasets and rules
for fileSpec in jobspec.outFiles:
# skip zipped files
if fileSpec.zipFileID is not None:
continue
# skip already done
if fileSpec.status in ['finished', 'failed']:
continue
# skip if already processed
if 'transferDataset' in fileSpec.fileAttributes:
continue
# no destination
if fileSpec.fileType not in transferDatasets:
fileSpec.status = 'finished'
continue
# set dataset
fileSpec.fileAttributes['transferDataset'] = transferDatasets[fileSpec.fileType]
# set rule
fileSpec.fileAttributes['transferID'] = transferIDs[fileSpec.fileType]
# force update
fileSpec.force_update('fileAttributes')
# return
tmpLog.debug('done')
return (True, '')
# zip output files
def zip_output(self, jobspec):
# make logger
tmpLog = self.make_logger(baseLogger, 'PandaID={0}'.format(jobspec.PandaID),
method_name='zip_output')
return self.simple_zip_output(jobspec, tmpLog)
| |
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import operator
import os
import signal
import sys
import traceback
import click
import daiquiri
import fixtures
import pbr.version
import pkg_resources
import psutil
from pifpaf import util
LOG = daiquiri.getLogger("pifpaf")
def _format_multiple_exceptions(e, debug=False):
valid_excs = []
# NOTE(sileht): Why do I not use this ? :
# excs = list(e.args)
# Because it raises SystemExit(2) on python3 !!?!?
excs = []
for i in range(len(e.args)):
excs.append(e.args[i])
while excs:
(etype, value, tb) = excs.pop(0)
if (etype == fixtures.MultipleExceptions):
excs.extend(value.args)
elif (etype == fixtures.SetupError):
continue
else:
valid_excs.append((etype, value, tb))
if len(valid_excs) == 1:
(etype, value, tb) = valid_excs[0]
if debug:
LOG.error("".join(traceback.format_exception(etype, value, tb)))
else:
LOG.error(value)
else:
LOG.error("MultipleExceptions raised:")
for n, (etype, value, tb) in enumerate(valid_excs):
if debug:
LOG.error("- exception %d:", n)
LOG.error("".join(
traceback.format_exception(etype, value, tb)))
else:
LOG.error(value)
DAEMONS = list(map(operator.attrgetter("name"),
pkg_resources.iter_entry_points("pifpaf.daemons")))
@click.group()
@click.option('--verbose/--quiet', help="Print mode details.")
@click.option('--debug', help="Show tracebacks on errors.", is_flag=True)
@click.option('--log-file', help="Specify a file to log output.",
type=click.Path(dir_okay=False))
@click.option("--env-prefix", "-e",
help="Prefix to use for environment variables (default: PIFPAF)")
@click.option("--global-urls-variable", "-g",
help="global variable name to use to append connection URL "
"when chaining multiple pifpaf instances (default: PIFPAF_URLS)")
@click.version_option(pbr.version.VersionInfo('pifpaf').version_string())
@click.pass_context
def main(ctx, verbose=False, debug=False, log_file=None,
env_prefix=None, global_urls_variable=None):
formatter = daiquiri.formatter.ColorFormatter(
fmt="%(color)s%(levelname)s "
"[%(name)s] %(message)s%(color_stop)s")
outputs = [
daiquiri.output.Stream(sys.stderr, formatter=formatter)
]
if log_file:
outputs.append(daiquiri.output.File(log_file,
formatter=formatter))
ctx.obj = {
"debug": debug,
}
if env_prefix is not None:
ctx.obj['env_prefix'] = env_prefix
if global_urls_variable is not None:
ctx.obj['global_urls_variable'] = global_urls_variable
if debug:
level = logging.DEBUG
elif verbose:
level = logging.INFO
else:
level = logging.WARNING
daiquiri.setup(outputs=outputs, level=level)
@main.command(name="list")
def drivers_list():
for n in DAEMONS:
click.echo(n)
class RunGroup(click.MultiCommand):
@staticmethod
def list_commands(ctx):
return DAEMONS
def get_command(self, ctx, name):
params = [click.Argument(["command"], nargs=-1)]
plugin = pkg_resources.load_entry_point(
"pifpaf", "pifpaf.daemons", name)
params.extend(map(lambda kw: click.Option(**kw), plugin.get_options()))
def _run_cb(*args, **kwargs):
return self._run(name, plugin, ctx, *args, **kwargs)
return click.Command(name=name, callback=_run_cb, params=params)
def format_commands(self, ctx, formatter):
# Same as click.MultiCommand.format_commands except it does not use
# get_command so we don't have to load commands on listing.
rows = []
for subcommand in self.list_commands(ctx):
rows.append((subcommand, 'Run ' + subcommand))
if rows:
with formatter.section('Commands'):
formatter.write_dl(rows)
def _run(self, daemon, plugin, ctx, command, **kwargs):
debug = ctx.obj['debug']
env_prefix = ctx.obj['env_prefix']
global_urls_variable = ctx.obj['global_urls_variable']
driver = plugin(env_prefix=env_prefix,
debug=debug,
**kwargs)
daemon = daemon
def putenv(key, value):
return os.putenv(env_prefix + "_" + key, value)
def expand_urls_var(url):
current_urls = os.getenv(global_urls_variable)
if current_urls:
return current_urls + ";" + url
return url
if command:
try:
driver.setUp()
except fixtures.MultipleExceptions as e:
_format_multiple_exceptions(e, debug)
sys.exit(1)
except Exception:
LOG.error("Unable to start %s, "
"use --debug for more information",
daemon, exc_info=True)
sys.exit(1)
putenv("PID", str(os.getpid()))
putenv("DAEMON", daemon)
url = os.getenv(driver.env_prefix + "_URL")
putenv("%s_URL" % daemon.upper(), url)
os.putenv(global_urls_variable,
expand_urls_var(url))
try:
c = psutil.Popen(command, preexec_fn=os.setsid)
except Exception:
driver.cleanUp()
raise RuntimeError("Unable to start command: %s"
% " ".join(command))
LOG.info(
"Command `%s` (pid %s) is ready",
" ".join(command), c.pid
)
def _cleanup(signum=None, frame=None, ret=0):
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
driver.cleanUp()
except Exception:
LOG.error("Unexpected cleanUp error", exc_info=True)
util.process_cleaner(c)
sys.exit(1 if signum == signal.SIGINT else ret)
signal.signal(signal.SIGTERM, _cleanup)
signal.signal(signal.SIGHUP, _cleanup)
signal.signal(signal.SIGINT, _cleanup)
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
try:
ret = c.wait()
except KeyboardInterrupt:
ret = 1
_cleanup(ret=ret)
else:
try:
driver.setUp()
except fixtures.MultipleExceptions as e:
_format_multiple_exceptions(e, debug)
sys.exit(1)
except Exception:
LOG.error("Unable to start %s, "
"use --debug for more information",
daemon, exc_info=True)
sys.exit(1)
pid = os.fork()
if pid == 0:
os.setsid()
devnull = os.open(os.devnull, os.O_RDWR)
os.dup2(devnull, 0)
os.dup2(devnull, 1)
os.dup2(devnull, 2)
def _cleanup(signum, frame):
driver.cleanUp()
sys.exit(0)
signal.signal(signal.SIGTERM, _cleanup)
signal.signal(signal.SIGHUP, _cleanup)
signal.signal(signal.SIGINT, _cleanup)
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
signal.pause()
else:
url = driver.env['%s_URL' % driver.env_prefix]
driver.env.update({
"PIFPAF_PID": pid,
env_prefix + "_PID": pid,
env_prefix + "_DAEMON": daemon,
(env_prefix + "_" +
daemon.upper() + "_URL"): url,
global_urls_variable:
expand_urls_var(url),
"%s_OLD_PS1" % env_prefix:
os.getenv("PS1", ""),
"PS1":
"(pifpaf/" + daemon + ") " + os.getenv("PS1", ""),
})
for k, v in driver.env.items():
print("export %s=\"%s\";" % (k, v))
print("%(prefix_lower)s_stop () { "
"if test -z \"$%(prefix)s_PID\"; then "
"echo 'No PID found in $%(prefix)s_PID'; return -1; "
"fi; "
"if kill $%(prefix)s_PID; then "
"_PS1=$%(prefix)s_OLD_PS1; "
"unset %(vars)s; "
"PS1=$_PS1; unset _PS1; "
"unset -f %(prefix_lower)s_stop; "
"unalias pifpaf_stop 2>/dev/null || true; "
"fi; } ; "
"alias pifpaf_stop=%(prefix_lower)s_stop ; "
% {"prefix": env_prefix,
"prefix_lower":
env_prefix.lower(),
"vars": " ".join(driver.env)})
@main.command(name="run", help="Run a daemon", cls=RunGroup)
@click.option("--env-prefix", "-e", default="PIFPAF",
help="Prefix to use for environment variables (default: PIFPAF)")
@click.option("--global-urls-variable", "-g", default="PIFPAF_URLS",
help="global variable name to use to append connection URL "
"when chaining multiple pifpaf instances (default: PIFPAF_URLS)")
@click.pass_context
def run(ctx, env_prefix, global_urls_variable):
ctx.obj['env_prefix'] = ctx.obj.get('env_prefix', env_prefix)
ctx.obj['global_urls_variable'] = ctx.obj.get('global_urls_variable',
global_urls_variable)
def run_main():
return main.main(standalone_mode=False)
if __name__ == '__main__':
sys.exit(run_main())
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return
# pylint: disable=unidiomatic-typecheck
"""
This file contains the set of passes for Relay, which exposes an interface for
configuring the passes and scripting them in Python.
"""
from ...ir import IRModule
from ...relay import transform, build_module
from ...runtime.ndarray import cpu
from . import _ffi_api
from .feature import Feature
def post_order_visit(expr, fvisit):
"""Recursively visit the ir in post DFS order node,
apply fvisit. Each node is guaranteed to be visited
only once.
Parameters
----------
expr : tvm.relay.Expr
The input expression.
fvisit : function
The visitor function to be applied.
"""
return _ffi_api.post_order_visit(expr, fvisit)
def well_formed(expr):
"""Check that each Var is only bound once (well formed).
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
well_form : bool
Whether the input expression is well formed
"""
return _ffi_api.well_formed(expr)
def check_kind(t, mod=None):
"""Check that the type is well kinded and return the kind.
For example, this mean type cannot has tensor of tensor, or is a tuple type
of 2 shapes.
Parameters
----------
t : tvm.relay.Type
The type to check
mod : Optional[tvm.IRModule]
The global module.
Returns
-------
kind : Kind
the kind of t
Examples
--------
.. code:: python
assert check_kind(relay.TupleType([relay.TypeParam('tp1', relay.Kind.Shape)])) == Shape
assert check_kind(relay.TupleType([relay.TypeParam('tp1', relay.Kind.Type)])) == Type
"""
if mod is not None:
return _ffi_api.check_kind(t, mod)
else:
return _ffi_api.check_kind(t)
def check_constant(expr):
"""Check whether an expression is constant
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
result : bool
Whether the expression is constant.
"""
return _ffi_api.check_constant(expr)
def check_basic_block_normal_form(expr):
"""Check whether an expression is in the basic block form
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
result : bool
Whether the expression is in the basic block form.
"""
return _ffi_api.check_basic_block_normal_form(expr)
def free_vars(expr):
"""Get free Vars from expression expr in Post DFS order.
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
free : List[tvm.relay.Var]
The list of free variables in post DFS order.
Note
----
The fact that Vars are post-DFS ordred are useful in
neural networks: usually this means weights of previous
are ordered first.
"""
return _ffi_api.free_vars(expr)
def bound_vars(expr):
"""Get bound vars from expression expr in post-DFS order.
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
free : List[tvm.relay.Var]
The list of bound variables in post-DFS order.
"""
return _ffi_api.bound_vars(expr)
def all_vars(expr):
"""Get all vars from expression expr in post-DFS order.
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
free : List[tvm.relay.Var]
The list of all variables in post-DFS order.
"""
return _ffi_api.all_vars(expr)
def free_type_vars(expr, mod=None):
"""Get free type variables from expression/type e
Parameters
----------
expr : Union[tvm.relay.Expr,tvm.relay.Type]
The input expression/type
mod : Optional[tvm.IRModule]
The global module
Returns
-------
free : List[tvm.relay.TypeVar]
The list of free type variables in post-DFS order
"""
use_mod = mod if mod is not None else IRModule()
return _ffi_api.free_type_vars(expr, use_mod)
def bound_type_vars(expr, mod=None):
"""Get bound type variables from expression/type e
Parameters
----------
expr : Union[tvm.relay.Expr,tvm.relay.Type]
The input expression/type
mod : Optional[tvm.IRModule]
The global module
Returns
-------
free : List[tvm.relay.TypeVar]
The list of bound type variables in post-DFS order
"""
use_mod = mod if mod is not None else IRModule()
return _ffi_api.bound_type_vars(expr, use_mod)
def all_type_vars(expr, mod=None):
"""Get all type variables from expression/type e
Parameters
----------
expr : Union[tvm.relay.Expr,tvm.relay.Type]
The input expression/type
mod : Optional[tvm.IRModule]
The global module
Returns
-------
free : List[tvm.relay.TypeVar]
The list of all type variables in post-DFS order
"""
use_mod = mod if mod is not None else IRModule()
return _ffi_api.all_type_vars(expr, use_mod)
def all_dtypes(expr):
"""Collect set of all data types used in `expr`.
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
ret : Set[String]
Set of data types used in the expression (e.g., `{'int8', 'int32'}`)
"""
return set(_ffi_api.all_dtypes(expr))
def get_total_mac_number(expr):
"""
Count the number of MACs (multiply-accumulate) of a model
Parameters
----------
expr : tvm.relay.Expr
The input expression.
Returns
-------
result : int64
The number of MACs (multiply-accumulate) of a model
"""
return _ffi_api.GetTotalMacNumber(expr)
def unmatched_cases(match, mod=None):
"""
Finds cases that the match expression does not catch, if any.
Parameters
----------
match : tvm.relay.Match
The match expression
mod : Optional[tvm.IRModule]
The module (defaults to an empty module)
Returns
-------
missing_patterns : [tvm.relay.Pattern]
Patterns that the match expression does not catch.
"""
return _ffi_api.unmatched_cases(match, mod)
def detect_feature(a, b=None):
"""
Detect the feature used in a relay program.
Parameters
----------
a : Union[tvm.relay.Expr, tvm.IRModule]
The input expression or module.
b : Optional[Union[tvm.relay.Expr, tvm.IRModule]]
The input expression or module.
The two arguments cannot both be expression or module.
Returns
-------
features : Set[Feature]
Features used in the program.
"""
if isinstance(a, IRModule):
a, b = b, a
return {Feature(int(x)) for x in _ffi_api.detect_feature(a, b)}
def extract_fused_functions(mod):
"""Pass to extract IRModule of only fused primitive functions.
The ExtractFusedFunctions pass invokes SimplifyInference, FuseOps(3),
and ExtractFusedFunctions in that order
Parameters
----------
mod : tvm.IRModule
Returns
-------
ret : Dict[int, tvm.relay.function.Function]
A module containing only fused primitive functions
"""
ret_mod = _ffi_api.ExtractFusedFunctions()(mod)
ret = {}
for hash_, func in ret_mod.functions.items():
ret[hash_] = func
return ret
def list_op_freqs(mod):
"""Pass to extract unique operator names and how frequently they appear
in an IRModule. Fused functions are traversed to count the operators
that compose them.
Parameters
----------
mod : tvm.IRModule
Returns
-------
ret : Dict[str, int]
Dict of unique operator names to frequency
"""
return _ffi_api.ExtractOperators(mod)
def list_fake_quantized_op_freqs(mod):
"""Pass to extract fake quantized op names and the frequency that they appear
in fake quantized regions of an IRModule.
Parameters
----------
mod : tvm.IRModule
Returns
-------
ret : Dict[str, int]
Dict of fake quantized operator names to frequency
"""
return _ffi_api.ExtractFakeQuantizedOps(mod)
def search_fc_transpose(expr):
"""Search fc weight name in the patten: y = nn.dense(x, transpose(w, [1, 0]))
This function is used in the data_dep_optimization.simplify_fc_transpose method
Parameters
----------
expr : tvm.relay.Expr
Returns
-------
ret : Array[String]
Array of weight variable name in pattern y = nn.dense(x, transpose(w, [1, 0]))
"""
ret = _ffi_api.search_fc_transpose(expr)
return ret
def get_calibration_data(mod, data):
"""Get the calibration data of a given relay graph
This pass uses the graph executor to get the calibration data of a module, which
includes the input and output values of each function. The returned data uses
the GlobalVar of each function as a key. Users can further access the inputs and
outputs by using `inputs` or `outputs` as the key.
Following are some limitations:
1. The input module (graph) cannot have control flows.
2. The input arguments of each function cannot be tuples (outputs can be tuples).
3. We only handle top-level functions (i.e., nested function is not handled).
4. We only handle functions with `Compiler` attribute being set.
Parameters
----------
mod : tvm.IRModule
The input module for collecting the calibration data
data : Dict[str, NDArray]
The input data for running the module
Returns
-------
data : Dict[tvm.relay.GlobalVar, Dict[str, NDArray]]
"""
output_map = _ffi_api.get_calibrate_output_map(mod)
mod = _ffi_api.get_calibrate_module(mod)
mod = transform.Inline()(mod)
ref_res = build_module.create_executor("graph", mod=mod, device=cpu(0)).evaluate()(**data)
calib_data = {}
for gvar, indices in output_map.items():
offset = int(indices[0])
in_len = int(indices[1])
out_len = int(indices[2])
value = {
"inputs": ref_res[offset : offset + in_len],
"outputs": ref_res[offset + in_len : offset + in_len + out_len],
}
calib_data[gvar] = value
return calib_data
| |
import demistomock as demisto # noqa: F401
import xmltodict
from CommonServerPython import * # noqa: F401
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%d-%H-%M-%S'
class PanOSXMLAPI(BaseClient):
def __init__(self, host, port, api_key, verify, timeout, proxy, verbose):
# Map class parameters for the target firewall
self.params = {
'ngfw_host': 'https://' + host,
'ngfw_port': port,
'ngfw_tls_verify': verify,
'ngfw_timeout': int(timeout),
'ngfw_proxy': proxy,
'ngfw_verbose': verbose
}
self.api_key = api_key
# If using a custom port (e.g. when GlobalProtect Clientless and management are enabled on the same interface)
if self.params['ngfw_port'] == '443':
base = self.params['ngfw_host'] + '/api/'
super().__init__(base, self.params['ngfw_tls_verify'])
else:
base = self.params['ngfw_host'].rstrip('/:') + ':' + self.params['ngfw_port'] + '/api/'
super().__init__(base, self.params['ngfw_tls_verify'])
# Use the XSOAR system proxy to route requests
if proxy is True:
self.proxies = handle_proxy()
else:
self.proxies = {}
def get_system_info(self):
return self.xmlapi_request_op('<show><system><info></info></system></show>')
def xmlapi_request_op(self, cmd, response_type='response', no_validate=False):
# Map and construct query
params = {
'type': 'op',
'cmd': cmd,
'key': self.api_key
}
# Execute query
response = self._http_request(
'POST',
'api',
params=params,
resp_type=response_type,
proxies=self.proxies,
timeout=self.params['ngfw_timeout']
)
if no_validate is True:
return response
else:
# Validate response from the API
result = self.xmlapi_request_validate(response)
if result is True:
return response
else:
raise Exception(
'Could not validate API response! [panos_xmlapi_request_op() -> panos_xmlapi_request_validate()]')
def xmlapi_request_validate(self, response):
# Load result into an XML object
result = xmltodict.parse(response.content)
# Get API response status
status = result['response']['@status']
if self.params['ngfw_verbose'] is True:
demisto.log('Got execution status back from API: ' + str(status))
demisto.log(str(response.text))
if "success" in status:
return True
else:
message = result['response']['msg']['line']
raise Exception('API call encountered an error, received "' + str(
status) + ' " as status code with error message: ' + str(message))
def get_stats_init_job_id(self):
params = {
'type': 'export',
'category': 'stats-dump',
'key': self.api_key
}
response = self._http_request(
'POST',
'api',
params=params,
resp_type='response',
proxies=self.proxies,
timeout=self.params['ngfw_timeout']
)
result = self.xmlapi_request_validate(response)
if result is True:
result = xmltodict.parse(response.content)
job = result['response']['result']['job']
return job
else:
raise Exception('Could not validate API response! [get_stats_init_job_id() -> xmlapi_request_validate()]')
def get_stats_job_id_status(self, job_id):
params = {
'type': 'export',
'category': 'stats-dump',
'action': 'status',
'job-id': job_id,
'key': self.api_key
}
response = self._http_request(
'POST',
'api',
params=params,
resp_type='response',
proxies=self.proxies,
timeout=self.params['ngfw_timeout']
)
result = self.xmlapi_request_validate(response)
if result is True:
resp = xmltodict.parse(response.content)
status = resp['response']['result']['job']['status']
progress = resp['response']['result']['job']['progress']
result = {
'status': status,
'progress': progress
}
return result
else:
raise Exception('Could not validate API response! [get_stats_job_id_status() -> xmlapi_request_validate()]')
def get_stats_archive(self, job_id):
# Get firewall system name, serial number
system_info = self.get_system_info()
resp = xmltodict.parse(system_info.content)
system_name = resp['response']['result']['system']['devicename']
system_serial = resp['response']['result']['system']['serial']
time_stamp = time.strftime(DATE_FORMAT)
output_file = str(system_name) + '-' + str(system_serial) + '-' + str(time_stamp) + '-stats_dump.tar.gz'
if self.params['ngfw_verbose'] is True:
demisto.log('Constructed archive name as: [`' + output_file + '`]')
params = {
'type': 'export',
'category': 'stats-dump',
'action': 'get',
'job-id': job_id,
'key': self.api_key
}
response = self._http_request(
'GET',
'api',
params=params,
resp_type='content',
proxies=self.proxies,
timeout=self.params['ngfw_timeout']
)
result = {
'file_name': output_file,
'file_contents': response
}
return result
def dump_ngfw_params(self):
return self.params
class PanwCSP(BaseClient):
def __init__(self, host, csp_key, verify, timeout, proxy, verbose, account_name=None, deployment_location=None,
geographic_country=None, geographic_region=None, industry=None, language=None, prepared_by=None,
requested_by=None, send_to=None):
self.params = {
'csp_host': host,
'csp_tls_verify': verify,
'csp_timeout': int(timeout),
'csp_proxy': proxy,
'csp_verbose': verbose
}
self.slr_params = {}
self.api_key = csp_key
if proxy is True:
self.proxies = handle_proxy()
else:
self.proxies = {}
# The "Prepared By" name to appear on the front page of the report
if prepared_by is not None:
self.slr_params.update({'slr_prepared_by': prepared_by})
else:
raise Exception('slr_prepared_by cannot be None!')
# The email address to appear on the front page of the report
if requested_by is not None:
self.slr_params.update({'slr_requested_by': requested_by})
else:
raise Exception('slr_requested_by cannot be None!')
# The email address to send the completed report to
if send_to is not None:
self.slr_params.update({'slr_send_to': send_to})
else:
raise Exception('slr_send_to cannot be None!')
# Override the SFDC details on record for the account
if account_name is not None:
self.slr_params.update({'slr_account_name': account_name})
else:
raise Exception('slr_account_name cannot be None!')
# Override the SFDC details on record for the account
if industry is not None:
self.slr_params.update({'slr_industry': industry})
else:
raise Exception('slr_industry cannot be None!')
# Override the SFDC details on record for the account
if geographic_country is not None:
self.slr_params.update({'slr_country': geographic_country})
else:
raise Exception('slr_country cannot be None!')
# Override the SFDC details on record for the account
if 'Americas' in geographic_region:
self.slr_params.update({'slr_geographic_region': 'North America, Latin America, Canada'})
elif 'APAC' in geographic_region:
self.slr_params.update({'slr_geographic_region': 'Asia Pacific'})
elif 'EMEA' in geographic_region:
self.slr_params.update({'slr_geographic_region': 'Europe'})
elif 'Japan' in geographic_region:
self.slr_params.update({'slr_geographic_region': 'Japan'})
else:
raise Exception('Invalid parameter specified for slr_geographic_region!')
# Override the SFDC details on record for the account
if deployment_location is not None:
self.slr_params.update({'slr_deployment_location': deployment_location})
else:
raise Exception('slr_deployment_location cannot be None!')
# Override the SFDC details on record for the account
if language is not None:
self.slr_params.update({'slr_language': language})
else:
raise Exception('slr_language cannot be None!')
headers = {
'apikey': self.api_key
}
# Initiate the BaseClient
super().__init__(base_url=self.params['csp_host'], verify=self.params['csp_tls_verify'], headers=headers)
def upload_to_panw(self, file_data):
file_handler = open(file_data['file_actual_name'], 'rb')
file = {"files": (file_data['file_friendly_name'], file_handler, 'application/gzip')}
payload = {
"EmailIdList": self.slr_params['slr_send_to'],
"RequestedBy": self.slr_params['slr_requested_by'],
"PreparedBy": self.slr_params['slr_prepared_by'],
"AccountName": self.slr_params['slr_account_name'],
"Industry": self.slr_params['slr_industry'],
"Country": self.slr_params['slr_country'],
"GeographicRegion": self.slr_params['slr_geographic_region'],
"DeploymentLocation": self.slr_params['slr_deployment_location'],
"Language": self.slr_params['slr_language']
}
if self.params['csp_verbose'] is True:
demisto.log('Upload -> Parameters -> [' + str(payload) + ']')
demisto.log('Upload -> Files -> [' + str(file) + ']')
demisto.log('Uploading ' + file_data['file_friendly_name'] + ' to Palo Alto Networks...')
response = self._http_request(
'POST',
'/API/v1/Create/',
data=payload,
files=file,
resp_type='json',
proxies=self.proxies,
timeout=self.params['csp_timeout']
)
return response
def dump_csp_params(self, req_type='init'):
if 'init' in req_type:
return self.params
elif 'slr' in req_type:
return self.slr_params
else:
raise Exception('Invalid type passed to function, valid types are: init, slr')
def test_module(xmlapi):
# TODO: Rewrite test-module to be more relevant
response = xmlapi.get_system_info()
result = xmltodict.parse(response.content)
hostname = result['response']['result']['system']['hostname']
serial = result['response']['result']['system']['serial']
if hostname is not None and serial is not None:
return demisto.results('ok')
else:
raise Exception('test_module() failed!')
def ngfw_get_system_info(xmlapi):
# response = json.loads(xml2json(xmlapi.get_system_info()))
response = xmlapi.get_system_info()
result = xmltodict.parse(response.content)
hostname = result['response']['result']['system']['hostname']
serial = result['response']['result']['system']['serial']
software = result['response']['result']['system']['sw-version']
result = {
'hostname': hostname,
'serial': serial,
'software': software
}
readable_output = tableToMarkdown('Firewall Information', result)
return CommandResults(
readable_output=readable_output,
outputs_prefix='AutoSLR.ngfw_system_info',
outputs_key_field='ngfw_system_info',
outputs=result
)
def get_integration_params(csp, xmlapi):
csp_params = csp.dump_csp_params('init')
slr_params = csp.dump_csp_params('slr')
ngfw_params = xmlapi.dump_ngfw_params()
raw_result = {
**csp_params,
**ngfw_params,
**slr_params,
'system_proxy': demisto.params().get('proxy'),
'system_verbose': demisto.params().get('system_debug')
}
readable_output = tableToMarkdown('Integration Parameters', raw_result)
return CommandResults(
readable_output=readable_output,
outputs_prefix='AutoSLR.params',
outputs_key_field='params',
outputs=raw_result
)
def ngfw_generate_stats_dump(xmlapi):
result = xmlapi.get_stats_init_job_id()
readable_output = 'Successfully created stats-generate job! [ID: `' + result + '`]'
return CommandResults(
readable_output=readable_output,
outputs_prefix='AutoSLR.generate.job_id',
outputs_key_field='job_id',
outputs=result
)
def ngfw_get_stats_dump_status(xmlapi, job_id):
state = False
while not state:
demisto.log('Checking status for job ID: `' + str(job_id) + '`')
result = xmlapi.get_stats_job_id_status(job_id)
if 'FIN' in result['status']:
state = True
elif 'ACT' in result['status']:
demisto.log(
'Job `' + str(job_id) + '` is currently executing, current progress: `' + result['progress'] + '%`')
elif 'PEND' in result['status']:
demisto.log('Another job is currently executing, this job is currently in the queue')
else:
raise Exception('Unexpected value returned from API, expected [`ACT/FIN/PEND`] got: `' + str(result) + '`')
time.sleep(1)
if state is True:
readable_output = 'Successfully finished executing stats_dump generation job for job ID: `' + str(job_id) + '`'
return CommandResults(
readable_output=readable_output,
outputs_prefix='AutoSLR.generate.job_status',
outputs_key_field='job_status',
outputs=state
)
else:
raise Exception('Could not check stats_dump generation task [ID: `' + str(job_id) + '`]')
def ngfw_download_stats_dump(xmlapi, job_id):
result = xmlapi.get_stats_archive(job_id)
# demisto.results(fileResult(result['file_name'], result['file_contents'], entryTypes['entryInfoFile']))
file_entry = fileResult(result['file_name'], result['file_contents'], entryTypes['entryInfoFile'])
return file_entry
def upload_stats_to_panw(csp, input_file):
get_path = demisto.getFilePath(input_file)
file_data = {
'file_friendly_name': get_path.get('name'),
'file_actual_name': get_path.get('path')
}
demisto.log(
'Got file name [' + file_data['file_friendly_name'] + '] as path [' + file_data['file_actual_name'] + ']')
result = csp.upload_to_panw(file_data)
send_to = demisto.params().get('slr_send_to')
slr_id = result['Id']
readable_output = 'Success! The SLR Report will be emailed to ' + str(send_to) + ' (SLR ID: `' + str(slr_id) + '`)'
context = {
'id': slr_id,
'send_to': send_to
}
return CommandResults(
readable_output=readable_output,
outputs_prefix='AutoSLR.upload',
outputs_key_field=['id', 'send_to'],
outputs=context
)
def main():
# Parse the XSOAR Integrations Parameters
ngfw_host = demisto.params().get('ngfw_fqdn_ip')
ngfw_port = demisto.params().get('ngfw_port')
ngfw_api_key = demisto.params().get('ngfw_api_key')
ngfw_timeout = demisto.params().get('ngfw_timeout')
ngfw_tls_verify = demisto.params().get('ngfw_tls_verify')
csp_host = 'https://riskreport.paloaltonetworks.com/'
csp_api_key = demisto.params().get('csp_api_key')
csp_timeout = demisto.params().get('csp_timeout')
csp_tls_verify = demisto.params().get('csp_tls_verify')
system_proxy = demisto.params().get('proxy')
system_verbose = demisto.params().get('system_debug')
account_name = demisto.params().get('slr_account_name')
deployment_location = demisto.params().get('slr_deployment_location')
geographic_country = demisto.params().get('slr_geographic_country')
geographic_region = demisto.params().get('slr_geographic_region')
industry = demisto.params().get('slr_industry')
language = demisto.params().get('slr_language')
prepared_by = demisto.params().get('slr_prepared_by')
requested_by = demisto.params().get('slr_requested_by')
send_to = demisto.params().get('slr_send_to')
try:
if ngfw_tls_verify is False or csp_tls_verify is False:
requests.packages.urllib3.disable_warnings()
# Establish PANOS XMLAPI Class Connector
xmlapi = PanOSXMLAPI(ngfw_host, ngfw_port, ngfw_api_key, ngfw_tls_verify, ngfw_timeout, system_proxy,
system_verbose)
# Establish Palo Alto Networks Customer Support Portal (CSP) Class Connector
csp = PanwCSP(csp_host, csp_api_key, csp_tls_verify, csp_timeout, system_proxy, system_verbose, account_name,
deployment_location, geographic_country, geographic_region, industry, language, prepared_by,
requested_by, send_to)
# Map XSOAR Commands to caller functions
if demisto.command() == 'test-module':
return_results(test_module(xmlapi))
elif demisto.command() == 'autoslr-ngfw-system-info':
return_results(ngfw_get_system_info(xmlapi))
elif demisto.command() == 'autoslr-dump-params':
return_results(get_integration_params(csp, xmlapi))
elif demisto.command() == 'autoslr-ngfw-generate':
return_results(ngfw_generate_stats_dump(xmlapi))
elif demisto.command() == 'autoslr-ngfw-check':
return_results(ngfw_get_stats_dump_status(xmlapi, demisto.args().get('job_id')))
elif demisto.command() == 'autoslr-ngfw-download':
return_results(ngfw_download_stats_dump(xmlapi, demisto.args().get('job_id')))
elif demisto.command() == 'autoslr-csp-upload':
return_results(upload_stats_to_panw(csp, demisto.args().get('input_file')))
else:
raise NotImplementedError('Command "' + str(demisto.command()) + '" is not implemented.')
except Exception as e:
return_error('Failed to execute: [' + str(demisto.command()) + '] Received Error: [' + str(
e) + '] Traceback: [' + traceback.format_exc() + ']')
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
queue_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"queueName": _SERIALIZER.url("queue_name", queue_name, 'str', max_length=63, min_length=3, pattern=r'^[a-z0-9]([a-z0-9]|(-(?!-))){1,61}[a-z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
queue_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"queueName": _SERIALIZER.url("queue_name", queue_name, 'str', max_length=63, min_length=3, pattern=r'^[a-z0-9]([a-z0-9]|(-(?!-))){1,61}[a-z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
queue_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"queueName": _SERIALIZER.url("queue_name", queue_name, 'str', max_length=63, min_length=3, pattern=r'^[a-z0-9]([a-z0-9]|(-(?!-))){1,61}[a-z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
queue_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"queueName": _SERIALIZER.url("queue_name", queue_name, 'str', max_length=63, min_length=3, pattern=r'^[a-z0-9]([a-z0-9]|(-(?!-))){1,61}[a-z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
*,
maxpagesize: Optional[str] = None,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if maxpagesize is not None:
query_parameters['$maxpagesize'] = _SERIALIZER.query("maxpagesize", maxpagesize, 'str')
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class QueueOperations(object):
"""QueueOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def create(
self,
resource_group_name: str,
account_name: str,
queue_name: str,
queue: "_models.StorageQueue",
**kwargs: Any
) -> "_models.StorageQueue":
"""Creates a new queue with the specified queue name, under the specified account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param queue_name: A queue name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of lowercase alphanumeric and dash(-) characters only,
it should begin and end with an alphanumeric character and it cannot have two consecutive
dash(-) characters.
:type queue_name: str
:param queue: Queue properties and metadata to be created with.
:type queue: ~azure.mgmt.storage.v2021_06_01.models.StorageQueue
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageQueue, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_06_01.models.StorageQueue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageQueue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(queue, 'StorageQueue')
request = build_create_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
queue_name=queue_name,
content_type=content_type,
json=_json,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageQueue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
account_name: str,
queue_name: str,
queue: "_models.StorageQueue",
**kwargs: Any
) -> "_models.StorageQueue":
"""Creates a new queue with the specified queue name, under the specified account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param queue_name: A queue name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of lowercase alphanumeric and dash(-) characters only,
it should begin and end with an alphanumeric character and it cannot have two consecutive
dash(-) characters.
:type queue_name: str
:param queue: Queue properties and metadata to be created with.
:type queue: ~azure.mgmt.storage.v2021_06_01.models.StorageQueue
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageQueue, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_06_01.models.StorageQueue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageQueue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(queue, 'StorageQueue')
request = build_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
queue_name=queue_name,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageQueue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
account_name: str,
queue_name: str,
**kwargs: Any
) -> "_models.StorageQueue":
"""Gets the queue with the specified queue name, under the specified account if it exists.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param queue_name: A queue name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of lowercase alphanumeric and dash(-) characters only,
it should begin and end with an alphanumeric character and it cannot have two consecutive
dash(-) characters.
:type queue_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageQueue, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_06_01.models.StorageQueue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageQueue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
queue_name=queue_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageQueue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
account_name: str,
queue_name: str,
**kwargs: Any
) -> None:
"""Deletes the queue with the specified queue name, under the specified account if it exists.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param queue_name: A queue name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of lowercase alphanumeric and dash(-) characters only,
it should begin and end with an alphanumeric character and it cannot have two consecutive
dash(-) characters.
:type queue_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
queue_name=queue_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
maxpagesize: Optional[str] = None,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.ListQueueResource"]:
"""Gets a list of all the queues under the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param maxpagesize: Optional, a maximum number of queues that should be included in a list
queue response.
:type maxpagesize: str
:param filter: Optional, When specified, only the queues with a name starting with the given
filter will be listed.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListQueueResource or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_06_01.models.ListQueueResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListQueueResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxpagesize=maxpagesize,
filter=filter,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxpagesize=maxpagesize,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ListQueueResource", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues'} # type: ignore
| |
from contextlib import contextmanager
from datetime import date, datetime, time
from django.db.models import Q
from django.test import TestCase
from unittest.mock import call, patch
from casexml.apps.case.tests.util import create_case
from corehq.apps.app_manager.models import (
AdvancedForm,
AdvancedModule,
FormSchedule,
SchedulePhase,
SchedulePhaseForm,
ScheduleVisit,
)
from corehq.apps.data_interfaces.models import (
AutomaticUpdateRule,
CreateScheduleInstanceActionDefinition,
MatchPropertyDefinition,
VisitSchedulerIntegrationHelper,
)
from corehq.apps.data_interfaces.tests.util import create_empty_rule
from corehq.apps.domain.models import Domain
from corehq.apps.hqcase.utils import update_case
from corehq.apps.users.models import CommCareUser
from corehq.form_processor.models import CommCareCase
from corehq.messaging.scheduling.const import (
VISIT_WINDOW_DUE_DATE,
VISIT_WINDOW_END,
VISIT_WINDOW_START,
)
from corehq.messaging.scheduling.models import (
AlertSchedule,
CasePropertyTimedEvent,
SMSContent,
TimedEvent,
TimedSchedule,
)
from corehq.messaging.scheduling.scheduling_partitioned.dbaccessors import (
delete_case_schedule_instance,
get_case_alert_schedule_instances_for_schedule,
get_case_timed_schedule_instances_for_schedule,
)
from corehq.messaging.scheduling.scheduling_partitioned.models import (
CaseAlertScheduleInstance,
CaseTimedScheduleInstance,
)
from corehq.messaging.scheduling.tasks import (
handle_case_timed_schedule_instance,
)
from corehq.messaging.scheduling.tests.util import (
delete_alert_schedules,
delete_timed_schedules,
)
from corehq.messaging.tasks import (
run_messaging_rule,
run_messaging_rule_for_shard,
sync_case_for_messaging,
sync_case_for_messaging_rule,
)
from corehq.sql_db.util import paginate_query_across_partitioned_databases
def get_visit_scheduler_module_and_form_for_test():
form = AdvancedForm(
schedule=FormSchedule(
unique_id='form-unique-id-1',
schedule_form_id='form1',
enabled=True,
visits=[
ScheduleVisit(due=1, starts=-1, expires=1, repeats=False, increment=None),
ScheduleVisit(due=7, starts=-2, expires=3, repeats=False, increment=None),
ScheduleVisit(due=None, starts=None, expires=None, repeats=True, increment=14),
],
)
)
module = AdvancedModule(
schedule_phases=[
SchedulePhase(anchor='edd', forms=[]),
SchedulePhase(anchor='add', forms=[SchedulePhaseForm(form_id=form.unique_id)]),
],
forms=[form],
)
return module, form
class CaseRuleSchedulingIntegrationTest(TestCase):
domain = 'case-rule-scheduling-test'
@classmethod
def setUpClass(cls):
super(CaseRuleSchedulingIntegrationTest, cls).setUpClass()
cls.domain_obj = Domain(
name=cls.domain,
default_timezone='America/New_York',
)
cls.domain_obj.save()
cls.user = CommCareUser.create(cls.domain, 'test1', 'abc', None, None)
@classmethod
def tearDownClass(cls):
cls.user.delete(cls.domain, deleted_by=None)
cls.domain_obj.delete()
super(CaseRuleSchedulingIntegrationTest, cls).tearDownClass()
def tearDown(self):
for rule in AutomaticUpdateRule.objects.filter(domain=self.domain):
rule.hard_delete()
for instance in paginate_query_across_partitioned_databases(
CaseAlertScheduleInstance, Q(domain=self.domain)):
delete_case_schedule_instance(instance)
for instance in paginate_query_across_partitioned_databases(
CaseTimedScheduleInstance, Q(domain=self.domain)):
delete_case_schedule_instance(instance)
delete_alert_schedules(self.domain)
delete_timed_schedules(self.domain)
@patch('corehq.messaging.scheduling.util.utcnow')
def test_timed_schedule_instance_creation(self, utcnow_patch):
schedule = TimedSchedule.create_simple_daily_schedule(
self.domain,
TimedEvent(time=time(9, 0)),
SMSContent(message={'en': 'Hello'})
)
rule = create_empty_rule(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rule.add_criteria(
MatchPropertyDefinition,
property_name='start_sending',
property_value='Y',
match_type=MatchPropertyDefinition.MATCH_EQUAL,
)
rule.add_action(
CreateScheduleInstanceActionDefinition,
timed_schedule_id=schedule.schedule_id,
recipients=(('CommCareUser', self.user.get_id),)
)
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
utcnow_patch.return_value = datetime(2017, 5, 1, 7, 0)
with create_case(self.domain, 'person') as case:
# Rule does not match, no instances created
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
# Make the rule match. On the first iteration, the instance is created. On the second,
# no new instance is created since it already exists.
for minute in [1, 2]:
utcnow_patch.return_value = datetime(2017, 5, 1, 7, minute)
update_case(self.domain, case.case_id, case_properties={'start_sending': 'Y'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 5, 1))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 5, 1, 13, 0))
self.assertTrue(instances[0].active)
# Make the rule not match. Instance should no longer exist.
utcnow_patch.return_value = datetime(2017, 5, 1, 7, 3)
update_case(self.domain, case.case_id, case_properties={'start_sending': 'N'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
@patch('corehq.messaging.scheduling.util.utcnow')
def test_alert_schedule_instance_creation(self, utcnow_patch):
schedule = AlertSchedule.create_simple_alert(
self.domain,
SMSContent(message={'en': 'Hello'})
)
rule = create_empty_rule(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rule.add_criteria(
MatchPropertyDefinition,
property_name='start_sending',
property_value='Y',
match_type=MatchPropertyDefinition.MATCH_EQUAL,
)
rule.add_action(
CreateScheduleInstanceActionDefinition,
alert_schedule_id=schedule.schedule_id,
recipients=(('CommCareUser', self.user.get_id),)
)
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
utcnow_patch.return_value = datetime(2017, 5, 1, 7, 0)
with create_case(self.domain, 'person') as case:
# Rule does not match, no instances created
instances = get_case_alert_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
# Make the rule match. On the first iteration, the instance is created. On the second,
# no new instance is created since it already exists.
for minute in range(1, 3):
utcnow_patch.return_value = datetime(2017, 5, 1, 7, minute)
update_case(self.domain, case.case_id, case_properties={'start_sending': 'Y'})
instances = get_case_alert_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].alert_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 5, 1, 7, 1))
self.assertTrue(instances[0].active)
# Make the rule not match. Instance should no longer exist.
utcnow_patch.return_value = datetime(2017, 5, 1, 7, 3)
update_case(self.domain, case.case_id, case_properties={'start_sending': 'N'})
instances = get_case_alert_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
@patch('corehq.messaging.scheduling.util.utcnow')
def test_alert_schedule_reset(self, utcnow_patch):
schedule = AlertSchedule.create_simple_alert(
self.domain,
SMSContent(message={'en': 'Hello'})
)
rule = create_empty_rule(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rule.add_criteria(
MatchPropertyDefinition,
property_name='start_sending',
property_value='Y',
match_type=MatchPropertyDefinition.MATCH_EQUAL,
)
rule.add_action(
CreateScheduleInstanceActionDefinition,
alert_schedule_id=schedule.schedule_id,
recipients=(('CommCareUser', self.user.get_id),),
reset_case_property_name='reset_property',
)
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
utcnow_patch.return_value = datetime(2017, 5, 1, 7, 0)
with create_case(self.domain, 'person') as case:
# Rule does not match, no instances created
instances = get_case_alert_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
# Make the rule match. On the first iteration, the instance is created. On the second,
# nothing is changed.
for minute in (1, 2):
utcnow_patch.return_value = datetime(2017, 5, 1, 7, minute)
update_case(self.domain, case.case_id,
case_properties={'start_sending': 'Y', 'reset_property': 'a'})
instances = get_case_alert_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].alert_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 5, 1, 7, 1))
self.assertEqual(instances[0].last_reset_case_property_value, 'a')
self.assertTrue(instances[0].active)
# Update the reset property, and the instance is reset.
utcnow_patch.return_value = datetime(2017, 6, 1, 7, 0)
update_case(self.domain, case.case_id, case_properties={'reset_property': 'b'})
instances = get_case_alert_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].alert_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 6, 1, 7, 0))
self.assertEqual(instances[0].last_reset_case_property_value, 'b')
self.assertTrue(instances[0].active)
@patch('corehq.messaging.scheduling.util.utcnow')
def test_timed_schedule_reset(self, utcnow_patch):
schedule = TimedSchedule.create_simple_daily_schedule(
self.domain,
TimedEvent(time=time(9, 0)),
SMSContent(message={'en': 'Hello'})
)
rule = create_empty_rule(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rule.add_criteria(
MatchPropertyDefinition,
property_name='start_sending',
property_value='Y',
match_type=MatchPropertyDefinition.MATCH_EQUAL,
)
rule.add_action(
CreateScheduleInstanceActionDefinition,
timed_schedule_id=schedule.schedule_id,
recipients=(('CommCareUser', self.user.get_id),),
reset_case_property_name='reset_property',
)
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
utcnow_patch.return_value = datetime(2017, 5, 1, 7, 0)
with create_case(self.domain, 'person') as case:
# Rule does not match, no instances created
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
# Make the rule match. On the first iteration, the instance is created. On the second,
# no new instance is created since it already exists.
for day in [1, 2]:
utcnow_patch.return_value = datetime(2017, 5, day, 20, 0)
update_case(self.domain, case.case_id,
case_properties={'start_sending': 'Y', 'reset_property': '1'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 5, 2))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 5, 2, 13, 0))
self.assertTrue(instances[0].active)
# Change the value of 'reset_property', and the start date should be reset
utcnow_patch.return_value = datetime(2017, 5, 2, 20, 0)
update_case(self.domain, case.case_id, case_properties={'reset_property': '2'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 5, 3))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 5, 3, 13, 0))
self.assertTrue(instances[0].active)
# Make the rule not match. Instance should no longer exist.
utcnow_patch.return_value = datetime(2017, 5, 2, 20, 0)
update_case(self.domain, case.case_id, case_properties={'start_sending': 'N'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
@patch('corehq.messaging.scheduling.models.content.SMSContent.send')
@patch('corehq.messaging.scheduling.util.utcnow')
def test_timed_schedule_stop_date_case_property(self, utcnow_patch, send_patch):
schedule = TimedSchedule.create_simple_daily_schedule(
self.domain,
TimedEvent(time=time(9, 0)),
SMSContent(message={'en': 'Hello'}),
extra_options={'stop_date_case_property_name': 'stop_date'},
)
rule = create_empty_rule(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rule.add_action(
CreateScheduleInstanceActionDefinition,
timed_schedule_id=schedule.schedule_id,
recipients=(('CommCareUser', self.user.get_id),),
)
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
utcnow_patch.return_value = datetime(2018, 7, 1, 7, 0)
with create_case(self.domain, 'person') as case:
# The case matches the rule and is setup to start sending
update_case(self.domain, case.case_id, case_properties={'stop_date': '2018-07-03'})
[instance] = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instance.case_id, case.case_id)
self.assertEqual(instance.rule_id, rule.pk)
self.assertEqual(instance.timed_schedule_id, schedule.schedule_id)
self.assertEqual(instance.start_date, date(2018, 7, 1))
self.assertEqual(instance.domain, self.domain)
self.assertEqual(instance.recipient_type, 'CommCareUser')
self.assertEqual(instance.recipient_id, self.user.get_id)
self.assertEqual(instance.current_event_num, 0)
self.assertEqual(instance.schedule_iteration_num, 1)
self.assertEqual(instance.next_event_due, datetime(2018, 7, 1, 13, 0))
self.assertTrue(instance.active)
# Send the first event, and schedule the next event for the next day
utcnow_patch.return_value = datetime(2018, 7, 1, 13, 1)
handle_case_timed_schedule_instance(case.case_id, instance.schedule_instance_id.hex, self.domain)
self.assertEqual(send_patch.call_count, 1)
[instance] = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instance.case_id, case.case_id)
self.assertEqual(instance.rule_id, rule.pk)
self.assertEqual(instance.timed_schedule_id, schedule.schedule_id)
self.assertEqual(instance.start_date, date(2018, 7, 1))
self.assertEqual(instance.domain, self.domain)
self.assertEqual(instance.recipient_type, 'CommCareUser')
self.assertEqual(instance.recipient_id, self.user.get_id)
self.assertEqual(instance.current_event_num, 0)
self.assertEqual(instance.schedule_iteration_num, 2)
self.assertEqual(instance.next_event_due, datetime(2018, 7, 2, 13, 0))
self.assertTrue(instance.active)
# Send the second event, and deactivate because the stop date has been reached
utcnow_patch.return_value = datetime(2018, 7, 2, 13, 1)
handle_case_timed_schedule_instance(case.case_id, instance.schedule_instance_id.hex, self.domain)
self.assertEqual(send_patch.call_count, 2)
[instance] = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instance.case_id, case.case_id)
self.assertEqual(instance.rule_id, rule.pk)
self.assertEqual(instance.timed_schedule_id, schedule.schedule_id)
self.assertEqual(instance.start_date, date(2018, 7, 1))
self.assertEqual(instance.domain, self.domain)
self.assertEqual(instance.recipient_type, 'CommCareUser')
self.assertEqual(instance.recipient_id, self.user.get_id)
self.assertEqual(instance.current_event_num, 0)
self.assertEqual(instance.schedule_iteration_num, 3)
self.assertEqual(instance.next_event_due, datetime(2018, 7, 3, 13, 0))
self.assertFalse(instance.active)
# Update the stop date and the instance should be reactivated
update_case(self.domain, case.case_id, case_properties={'stop_date': '2018-08-01'})
[instance] = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instance.case_id, case.case_id)
self.assertEqual(instance.rule_id, rule.pk)
self.assertEqual(instance.timed_schedule_id, schedule.schedule_id)
self.assertEqual(instance.start_date, date(2018, 7, 1))
self.assertEqual(instance.domain, self.domain)
self.assertEqual(instance.recipient_type, 'CommCareUser')
self.assertEqual(instance.recipient_id, self.user.get_id)
self.assertEqual(instance.current_event_num, 0)
self.assertEqual(instance.schedule_iteration_num, 3)
self.assertEqual(instance.next_event_due, datetime(2018, 7, 3, 13, 0))
self.assertTrue(instance.active)
# Update the stop date and the instance should be deactivated
update_case(self.domain, case.case_id, case_properties={'stop_date': '2018-06-01'})
[instance] = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instance.case_id, case.case_id)
self.assertEqual(instance.rule_id, rule.pk)
self.assertEqual(instance.timed_schedule_id, schedule.schedule_id)
self.assertEqual(instance.start_date, date(2018, 7, 1))
self.assertEqual(instance.domain, self.domain)
self.assertEqual(instance.recipient_type, 'CommCareUser')
self.assertEqual(instance.recipient_id, self.user.get_id)
self.assertEqual(instance.current_event_num, 0)
self.assertEqual(instance.schedule_iteration_num, 3)
self.assertEqual(instance.next_event_due, datetime(2018, 7, 3, 13, 0))
self.assertFalse(instance.active)
# Update the stop date and the instance should be reactivated and fast-forwarded
utcnow_patch.return_value = datetime(2018, 7, 4, 13, 1)
update_case(self.domain, case.case_id, case_properties={'stop_date': '2018-08-01'})
[instance] = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instance.case_id, case.case_id)
self.assertEqual(instance.rule_id, rule.pk)
self.assertEqual(instance.timed_schedule_id, schedule.schedule_id)
self.assertEqual(instance.start_date, date(2018, 7, 1))
self.assertEqual(instance.domain, self.domain)
self.assertEqual(instance.recipient_type, 'CommCareUser')
self.assertEqual(instance.recipient_id, self.user.get_id)
self.assertEqual(instance.current_event_num, 0)
self.assertEqual(instance.schedule_iteration_num, 5)
self.assertEqual(instance.next_event_due, datetime(2018, 7, 5, 13, 0))
self.assertTrue(instance.active)
@patch('corehq.messaging.scheduling.util.utcnow')
def test_timed_schedule_start_date_case_property(self, utcnow_patch):
schedule = TimedSchedule.create_simple_daily_schedule(
self.domain,
TimedEvent(time=time(9, 0)),
SMSContent(message={'en': 'Hello'}),
total_iterations=1,
)
rule = create_empty_rule(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rule.add_criteria(
MatchPropertyDefinition,
property_name='start_sending',
property_value='Y',
match_type=MatchPropertyDefinition.MATCH_EQUAL,
)
rule.add_action(
CreateScheduleInstanceActionDefinition,
timed_schedule_id=schedule.schedule_id,
recipients=(('CommCareUser', self.user.get_id),),
start_date_case_property='appointment_date',
)
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
utcnow_patch.return_value = datetime(2017, 5, 1, 7, 0)
with create_case(self.domain, 'person') as case:
# Rule does not match, no instances created
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
# Make the rule match, but don't give a start date. No instances are created.
update_case(self.domain, case.case_id, case_properties={'start_sending': 'Y'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
# Give a start date. On the first iteration, the instance is created. On the second,
# no new instance is created since it already exists.
for minute in [1, 2]:
utcnow_patch.return_value = datetime(2017, 5, 1, 7, minute)
update_case(self.domain, case.case_id, case_properties={'appointment_date': '2017-06-01'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 6, 1))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 6, 1, 13, 0))
self.assertTrue(instances[0].active)
# Update start date. Instance is updated with new start date,
update_case(self.domain, case.case_id, case_properties={'appointment_date': '2017-07-01'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 7, 1))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 7, 1, 13, 0))
self.assertTrue(instances[0].active)
# Set start date to the past. Instance is updated with new start date and is inactive
update_case(self.domain, case.case_id, case_properties={'appointment_date': '2017-04-01'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 4, 1))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 2)
self.assertEqual(instances[0].next_event_due, datetime(2017, 4, 2, 13, 0))
self.assertFalse(instances[0].active)
# Give an invalid start date. Instance should no longer exist.
update_case(self.domain, case.case_id, case_properties={'appointment_date': 'xyz'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
@contextmanager
def setup_timed_schedule_with_case(self, utcnow_patch):
schedule = TimedSchedule.create_simple_daily_schedule(
self.domain,
TimedEvent(time=time(9, 0)),
SMSContent(message={'en': 'Hello'}),
total_iterations=1,
)
rule = create_empty_rule(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rule.add_criteria(
MatchPropertyDefinition,
property_name='start_sending',
property_value='Y',
match_type=MatchPropertyDefinition.MATCH_EQUAL,
)
action, definition = rule.add_action(
CreateScheduleInstanceActionDefinition,
timed_schedule_id=schedule.schedule_id,
recipients=(('CommCareUser', self.user.get_id),),
specific_start_date=date(2018, 3, 1),
)
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
utcnow_patch.return_value = datetime(2018, 2, 28, 7, 0)
with create_case(self.domain, 'person') as case:
yield schedule, rule, definition, case
@patch('corehq.messaging.scheduling.util.utcnow')
def test_timed_schedule_specific_start_date(self, utcnow_patch):
setup = self.setup_timed_schedule_with_case(utcnow_patch)
with setup as (schedule, rule, definition, case):
# Rule does not match, no instances created
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
# Make the rule match. On the first iteration, the instance is created. On the second,
# no new instance is created since it already exists.
for minute in [1, 2]:
utcnow_patch.return_value = datetime(2018, 2, 28, 7, minute)
update_case(self.domain, case.case_id, case_properties={'start_sending': 'Y'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2018, 3, 1))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2018, 3, 1, 14, 0))
self.assertTrue(instances[0].active)
# Update start date. Instance is updated with new start date.
definition.specific_start_date = date(2018, 4, 1)
definition.save()
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
sync_case_for_messaging_rule(self.domain, case.case_id, rule.pk)
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2018, 4, 1))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2018, 4, 1, 13, 0))
self.assertTrue(instances[0].active)
# Set start date to the past. Instance is updated with new start date and is inactive.
definition.specific_start_date = date(2018, 2, 1)
definition.save()
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
sync_case_for_messaging_rule(self.domain, case.case_id, rule.pk)
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2018, 2, 1))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 2)
self.assertEqual(instances[0].next_event_due, datetime(2018, 2, 2, 14, 0))
self.assertFalse(instances[0].active)
@patch('corehq.messaging.scheduling.util.utcnow')
def test_sync_rule_on_hard_deleted_case(self, utcnow_patch):
setup = self.setup_timed_schedule_with_case(utcnow_patch)
with setup as (schedule, rule, definition, case):
utcnow_patch.return_value = datetime(2018, 2, 28, 7, 1)
update_case(self.domain, case.case_id, case_properties={'start_sending': 'Y'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
sync_case_for_messaging_rule(self.domain, case.case_id, rule.pk)
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
@patch('corehq.messaging.scheduling.util.utcnow')
def test_sync_messaging_on_hard_deleted_case(self, utcnow_patch):
setup = self.setup_timed_schedule_with_case(utcnow_patch)
with setup as (schedule, rule, definition, case):
utcnow_patch.return_value = datetime(2018, 2, 28, 7, 1)
update_case(self.domain, case.case_id, case_properties={'start_sending': 'Y'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
sync_case_for_messaging(self.domain, case.case_id)
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
@patch('corehq.messaging.scheduling.util.utcnow')
def test_timed_schedule_case_property_timed_event(self, utcnow_patch):
schedule = TimedSchedule.create_simple_daily_schedule(
self.domain,
CasePropertyTimedEvent(case_property_name='reminder_time'),
SMSContent(message={'en': 'Hello'})
)
rule = create_empty_rule(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rule.add_criteria(
MatchPropertyDefinition,
property_name='start_sending',
property_value='Y',
match_type=MatchPropertyDefinition.MATCH_EQUAL,
)
rule.add_action(
CreateScheduleInstanceActionDefinition,
timed_schedule_id=schedule.schedule_id,
recipients=(('CommCareUser', self.user.get_id),),
)
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
utcnow_patch.return_value = datetime(2017, 5, 1, 7, 0)
with create_case(self.domain, 'person') as case:
# Rule does not match, no instances created
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
# Make the rule match, but don't give a preferred time. Default scheduling time is used.
update_case(self.domain, case.case_id, case_properties={'start_sending': 'Y'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 5, 1))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 5, 1, 16, 0))
self.assertTrue(instances[0].active)
# Update the preferred time, and the schedule should recalculate
update_case(self.domain, case.case_id, case_properties={'reminder_time': '09:00'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 5, 1))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 5, 1, 13, 0))
self.assertTrue(instances[0].active)
# Update the preferred time to a bad value and the default time is used again.
update_case(self.domain, case.case_id, case_properties={'reminder_time': 'x'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 5, 1))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 5, 1, 16, 0))
self.assertTrue(instances[0].active)
@patch('corehq.apps.data_interfaces.models.'
'VisitSchedulerIntegrationHelper.get_visit_scheduler_module_and_form')
@patch('corehq.messaging.scheduling.util.utcnow')
def test_visit_scheduler_integration(self, utcnow_patch, module_and_form_patch):
schedule = TimedSchedule.create_simple_daily_schedule(
self.domain,
TimedEvent(time=time(9, 0)),
SMSContent(message={'en': 'Hello'}),
total_iterations=1,
)
rule = create_empty_rule(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
_, definition = rule.add_action(
CreateScheduleInstanceActionDefinition,
timed_schedule_id=schedule.schedule_id,
recipients=(('CommCareUser', self.user.get_id),)
)
module, form = get_visit_scheduler_module_and_form_for_test()
definition.set_scheduler_module_info(CreateScheduleInstanceActionDefinition.SchedulerModuleInfo(
enabled=True,
app_id='n/a for test',
form_unique_id=form.unique_id,
visit_number=1,
window_position=VISIT_WINDOW_START,
))
definition.save()
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
utcnow_patch.return_value = datetime(2017, 8, 1, 7, 0)
module_and_form_patch.return_value = module, form
with create_case(self.domain, 'person') as case:
# Schedule phase does not match, nothing is scheduled
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
update_case(self.domain, case.case_id,
case_properties={'add': '2017-08-01', 'current_schedule_phase': '2'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 8, 6))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 8, 6, 13, 0))
self.assertTrue(instances[0].active)
# If the anchor date gets updated (due to correction, for example), the schedule recalculates
update_case(self.domain, case.case_id, case_properties={'add': '2017-08-10'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 8, 15))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 8, 15, 13, 0))
self.assertTrue(instances[0].active)
# If the anchor date is in the past, the schedule instance is deactivated
update_case(self.domain, case.case_id, case_properties={'add': '2017-07-01'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 7, 6))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 2)
self.assertEqual(instances[0].next_event_due, datetime(2017, 7, 7, 13, 0))
self.assertFalse(instances[0].active)
# If the anchor date is reset, the schedule instance is reactivated
update_case(self.domain, case.case_id, case_properties={'add': '2017-08-01'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 8, 6))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 8, 6, 13, 0))
self.assertTrue(instances[0].active)
# Making an arbitrary update doesn't cause any recalculating to happen
with patch('corehq.messaging.scheduling.scheduling_partitioned.models.'
'AbstractTimedScheduleInstance.recalculate_schedule') as recalculate_patch:
update_case(self.domain, case.case_id, case_properties={'new_property': 'new value'})
self.assertEqual(recalculate_patch.call_count, 0)
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 8, 6))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 8, 6, 13, 0))
self.assertTrue(instances[0].active)
# Terminate the schedule, no more schedule instances should be scheduled
update_case(self.domain, case.case_id, case_properties={'current_schedule_phase': '-1'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 0)
@patch('corehq.messaging.scheduling.util.utcnow')
def test_start_offset(self, utcnow_patch):
schedule = TimedSchedule.create_simple_daily_schedule(
self.domain,
TimedEvent(time=time(9, 0)),
SMSContent(message={'en': 'Hello'}),
start_offset=2,
)
rule = create_empty_rule(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rule.add_action(
CreateScheduleInstanceActionDefinition,
timed_schedule_id=schedule.schedule_id,
recipients=(('CommCareUser', self.user.get_id),),
)
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
utcnow_patch.return_value = datetime(2017, 8, 1, 15, 0)
with create_case(self.domain, 'person') as case:
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 8, 1))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 8, 3, 13, 0))
self.assertEqual(instances[0].schedule_revision, schedule.get_schedule_revision())
self.assertTrue(instances[0].active)
# Change the schedule's start offset and force a case update to reprocess the schedule instance.
# The start date should not change, but the schedule instance should respond to the new start offset
# by calculating a new next_event_due timestamp.
schedule.start_offset = 5
schedule.save()
schedule = TimedSchedule.objects.get(schedule_id=schedule.schedule_id)
utcnow_patch.return_value = datetime(2017, 8, 4, 7, 0)
update_case(self.domain, case.case_id, case_properties={'new_property': 'new value'})
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 8, 1))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 8, 6, 13, 0))
self.assertEqual(instances[0].schedule_revision, schedule.get_schedule_revision())
self.assertTrue(instances[0].active)
# Making another arbitrary update doesn't cause any recalculating to happen
with patch('corehq.messaging.scheduling.scheduling_partitioned.models.'
'AbstractTimedScheduleInstance.recalculate_schedule') as recalculate_patch:
update_case(self.domain, case.case_id, case_properties={'new_property': 'new value 2'})
self.assertEqual(recalculate_patch.call_count, 0)
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
self.assertEqual(instances[0].case_id, case.case_id)
self.assertEqual(instances[0].rule_id, rule.pk)
self.assertEqual(instances[0].timed_schedule_id, schedule.schedule_id)
self.assertEqual(instances[0].start_date, date(2017, 8, 1))
self.assertEqual(instances[0].domain, self.domain)
self.assertEqual(instances[0].recipient_type, 'CommCareUser')
self.assertEqual(instances[0].recipient_id, self.user.get_id)
self.assertEqual(instances[0].current_event_num, 0)
self.assertEqual(instances[0].schedule_iteration_num, 1)
self.assertEqual(instances[0].next_event_due, datetime(2017, 8, 6, 13, 0))
self.assertEqual(instances[0].schedule_revision, schedule.get_schedule_revision())
self.assertTrue(instances[0].active)
def _setup_rule(self):
schedule = AlertSchedule.create_simple_alert(
self.domain,
SMSContent(message={'en': 'Hello'})
)
rule = create_empty_rule(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rule.add_action(
CreateScheduleInstanceActionDefinition,
alert_schedule_id=schedule.schedule_id,
recipients=(('Self', None),),
)
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
return rule.pk
@patch('corehq.messaging.tasks.sync_case_chunk_for_messaging_rule.delay')
@patch('corehq.messaging.tasks.run_messaging_rule_for_shard.delay')
@patch('corehq.apps.es.es_query.ESQuery.count', return_value=10)
def test_run_messaging_rule_sharded(self, es_patch, shard_rule_patch, sync_patch):
rule_id = self._setup_rule()
with create_case(self.domain, 'person') as case1, create_case(self.domain, 'person') as case2:
run_messaging_rule(self.domain, rule_id)
shard_rule_patch.assert_has_calls(
[
call(self.domain, rule_id, 'default')
],
any_order=True
)
run_messaging_rule_for_shard(self.domain, rule_id, 'default')
sync_patch.assert_has_calls(
[
call(self.domain, (case1.case_id, case2.case_id), rule_id)
],
any_order=True
)
self.assertEqual(es_patch.call_count, 1)
@patch('corehq.messaging.scheduling.models.content.SMSContent.send')
@patch('corehq.messaging.scheduling.util.utcnow')
def test_next_available_daily_slot(self, utcnow_patch, send_patch):
schedule = TimedSchedule.create_simple_daily_schedule(
self.domain,
TimedEvent(time=time(9, 0)),
SMSContent(message={'en': 'Hello'}),
total_iterations=2,
)
rule = create_empty_rule(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
# Leave all start date information blank so it schedules for the next available daily slot
rule.add_action(
CreateScheduleInstanceActionDefinition,
timed_schedule_id=schedule.schedule_id,
recipients=(('CommCareUser', self.user.get_id),),
)
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
# It's 3/1 at 7am local time; the schedule instance gets scheduled for the same day
utcnow_patch.return_value = datetime(2018, 3, 1, 12, 0)
with create_case(self.domain, 'person') as case:
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
instance = instances[0]
self.assertEqual(instance.case_id, case.case_id)
self.assertEqual(instance.rule_id, rule.pk)
self.assertEqual(instance.timed_schedule_id, schedule.schedule_id)
self.assertEqual(instance.start_date, date(2018, 3, 1))
self.assertEqual(instance.domain, self.domain)
self.assertEqual(instance.recipient_type, 'CommCareUser')
self.assertEqual(instance.recipient_id, self.user.get_id)
self.assertEqual(instance.current_event_num, 0)
self.assertEqual(instance.schedule_iteration_num, 1)
self.assertEqual(instance.next_event_due, datetime(2018, 3, 1, 14, 0))
self.assertEqual(instance.schedule_revision, schedule.get_schedule_revision())
self.assertTrue(instance.active)
# It's 3/1 at 10am local time; the schedule instance gets scheduled for the next day
utcnow_patch.return_value = datetime(2018, 3, 1, 15, 0)
with create_case(self.domain, 'person') as case:
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
instance = instances[0]
self.assertEqual(instance.case_id, case.case_id)
self.assertEqual(instance.rule_id, rule.pk)
self.assertEqual(instance.timed_schedule_id, schedule.schedule_id)
self.assertEqual(instance.start_date, date(2018, 3, 2))
self.assertEqual(instance.domain, self.domain)
self.assertEqual(instance.recipient_type, 'CommCareUser')
self.assertEqual(instance.recipient_id, self.user.get_id)
self.assertEqual(instance.current_event_num, 0)
self.assertEqual(instance.schedule_iteration_num, 1)
self.assertEqual(instance.next_event_due, datetime(2018, 3, 2, 14, 0))
self.assertEqual(instance.schedule_revision, schedule.get_schedule_revision())
self.assertTrue(instance.active)
# Fire the event
utcnow_patch.return_value = datetime(2018, 3, 2, 14, 1)
instance.handle_current_event()
self.assertEqual(send_patch.call_count, 1)
self.assertEqual(instance.case_id, case.case_id)
self.assertEqual(instance.rule_id, rule.pk)
self.assertEqual(instance.timed_schedule_id, schedule.schedule_id)
self.assertEqual(instance.start_date, date(2018, 3, 2))
self.assertEqual(instance.domain, self.domain)
self.assertEqual(instance.recipient_type, 'CommCareUser')
self.assertEqual(instance.recipient_id, self.user.get_id)
self.assertEqual(instance.current_event_num, 0)
self.assertEqual(instance.schedule_iteration_num, 2)
self.assertEqual(instance.next_event_due, datetime(2018, 3, 3, 14, 0))
self.assertEqual(instance.schedule_revision, schedule.get_schedule_revision())
self.assertTrue(instance.active)
@patch('corehq.messaging.scheduling.models.content.SMSContent.send')
@patch('corehq.messaging.scheduling.util.utcnow')
def test_next_available_weekly_slot(self, utcnow_patch, send_patch):
# Mondays with the week starting on Monday
schedule = TimedSchedule.create_simple_weekly_schedule(
self.domain,
TimedEvent(time=time(9, 0)),
SMSContent(message={'en': 'Hello'}),
[0],
0,
total_iterations=2,
)
rule = create_empty_rule(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
# Leave all start date information blank so it schedules for the next available weekly slot
rule.add_action(
CreateScheduleInstanceActionDefinition,
timed_schedule_id=schedule.schedule_id,
recipients=(('CommCareUser', self.user.get_id),),
)
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
# It's 3/4 at 10pm local time; the schedule instance gets scheduled for the same week
utcnow_patch.return_value = datetime(2018, 3, 5, 3, 0)
with create_case(self.domain, 'person') as case:
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
instance = instances[0]
self.assertEqual(instance.case_id, case.case_id)
self.assertEqual(instance.rule_id, rule.pk)
self.assertEqual(instance.timed_schedule_id, schedule.schedule_id)
self.assertEqual(instance.start_date, date(2018, 3, 4))
self.assertEqual(instance.domain, self.domain)
self.assertEqual(instance.recipient_type, 'CommCareUser')
self.assertEqual(instance.recipient_id, self.user.get_id)
self.assertEqual(instance.current_event_num, 0)
self.assertEqual(instance.schedule_iteration_num, 1)
self.assertEqual(instance.next_event_due, datetime(2018, 3, 5, 14, 0))
self.assertEqual(instance.schedule_revision, schedule.get_schedule_revision())
self.assertTrue(instance.active)
# It's 3/5 at 10pm local time; the schedule instance gets scheduled for the next week
utcnow_patch.return_value = datetime(2018, 3, 6, 3, 0)
with create_case(self.domain, 'person') as case:
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
instance = instances[0]
self.assertEqual(instance.case_id, case.case_id)
self.assertEqual(instance.rule_id, rule.pk)
self.assertEqual(instance.timed_schedule_id, schedule.schedule_id)
self.assertEqual(instance.start_date, date(2018, 3, 12))
self.assertEqual(instance.domain, self.domain)
self.assertEqual(instance.recipient_type, 'CommCareUser')
self.assertEqual(instance.recipient_id, self.user.get_id)
self.assertEqual(instance.current_event_num, 0)
self.assertEqual(instance.schedule_iteration_num, 1)
self.assertEqual(instance.next_event_due, datetime(2018, 3, 12, 13, 0))
self.assertEqual(instance.schedule_revision, schedule.get_schedule_revision())
self.assertTrue(instance.active)
# Fire the event
utcnow_patch.return_value = datetime(2018, 3, 12, 13, 1)
instance.handle_current_event()
self.assertEqual(send_patch.call_count, 1)
self.assertEqual(instance.case_id, case.case_id)
self.assertEqual(instance.rule_id, rule.pk)
self.assertEqual(instance.timed_schedule_id, schedule.schedule_id)
self.assertEqual(instance.start_date, date(2018, 3, 12))
self.assertEqual(instance.domain, self.domain)
self.assertEqual(instance.recipient_type, 'CommCareUser')
self.assertEqual(instance.recipient_id, self.user.get_id)
self.assertEqual(instance.current_event_num, 0)
self.assertEqual(instance.schedule_iteration_num, 2)
self.assertEqual(instance.next_event_due, datetime(2018, 3, 19, 13, 0))
self.assertEqual(instance.schedule_revision, schedule.get_schedule_revision())
self.assertTrue(instance.active)
@patch('corehq.messaging.scheduling.models.content.SMSContent.send')
@patch('corehq.messaging.scheduling.util.utcnow')
def test_next_available_monthly_slot(self, utcnow_patch, send_patch):
schedule = TimedSchedule.create_simple_monthly_schedule(
self.domain,
TimedEvent(time=time(9, 0)),
[15],
SMSContent(message={'en': 'Hello'}),
total_iterations=2,
)
rule = create_empty_rule(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
# Leave all start date information blank so it schedules for the next available monthly slot
rule.add_action(
CreateScheduleInstanceActionDefinition,
timed_schedule_id=schedule.schedule_id,
recipients=(('CommCareUser', self.user.get_id),),
)
AutomaticUpdateRule.clear_caches(self.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
# It's 3/4 at 5pm local time; the schedule instance gets scheduled for the same month
utcnow_patch.return_value = datetime(2018, 3, 4, 22, 0)
with create_case(self.domain, 'person') as case:
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
instance = instances[0]
self.assertEqual(instance.case_id, case.case_id)
self.assertEqual(instance.rule_id, rule.pk)
self.assertEqual(instance.timed_schedule_id, schedule.schedule_id)
self.assertEqual(instance.start_date, date(2018, 3, 4))
self.assertEqual(instance.domain, self.domain)
self.assertEqual(instance.recipient_type, 'CommCareUser')
self.assertEqual(instance.recipient_id, self.user.get_id)
self.assertEqual(instance.current_event_num, 0)
self.assertEqual(instance.schedule_iteration_num, 1)
self.assertEqual(instance.next_event_due, datetime(2018, 3, 15, 13, 0))
self.assertEqual(instance.schedule_revision, schedule.get_schedule_revision())
self.assertTrue(instance.active)
# It's 3/16 at 5pm local time; the schedule instance gets scheduled for the next month
utcnow_patch.return_value = datetime(2018, 3, 16, 21, 0)
with create_case(self.domain, 'person') as case:
instances = get_case_timed_schedule_instances_for_schedule(case.case_id, schedule)
self.assertEqual(instances.count(), 1)
instance = instances[0]
self.assertEqual(instance.case_id, case.case_id)
self.assertEqual(instance.rule_id, rule.pk)
self.assertEqual(instance.timed_schedule_id, schedule.schedule_id)
self.assertEqual(instance.start_date, date(2018, 4, 1))
self.assertEqual(instance.domain, self.domain)
self.assertEqual(instance.recipient_type, 'CommCareUser')
self.assertEqual(instance.recipient_id, self.user.get_id)
self.assertEqual(instance.current_event_num, 0)
self.assertEqual(instance.schedule_iteration_num, 1)
self.assertEqual(instance.next_event_due, datetime(2018, 4, 15, 13, 0))
self.assertEqual(instance.schedule_revision, schedule.get_schedule_revision())
self.assertTrue(instance.active)
# Fire the event
utcnow_patch.return_value = datetime(2018, 4, 15, 13, 1)
instance.handle_current_event()
self.assertEqual(send_patch.call_count, 1)
self.assertEqual(instance.case_id, case.case_id)
self.assertEqual(instance.rule_id, rule.pk)
self.assertEqual(instance.timed_schedule_id, schedule.schedule_id)
self.assertEqual(instance.start_date, date(2018, 4, 1))
self.assertEqual(instance.domain, self.domain)
self.assertEqual(instance.recipient_type, 'CommCareUser')
self.assertEqual(instance.recipient_id, self.user.get_id)
self.assertEqual(instance.current_event_num, 0)
self.assertEqual(instance.schedule_iteration_num, 2)
self.assertEqual(instance.next_event_due, datetime(2018, 5, 15, 13, 0))
self.assertEqual(instance.schedule_revision, schedule.get_schedule_revision())
self.assertTrue(instance.active)
class VisitSchedulerIntegrationHelperTestCase(TestCase):
domain = 'visit-scheduler-integration-helper'
@classmethod
def setUpClass(cls):
cls.module, cls.form = get_visit_scheduler_module_and_form_for_test()
super(VisitSchedulerIntegrationHelperTestCase, cls).setUpClass()
def get_helper(self, case, visit_number=2, window_position=VISIT_WINDOW_START):
return VisitSchedulerIntegrationHelper(
case,
CreateScheduleInstanceActionDefinition.SchedulerModuleInfo(
enabled=True,
app_id='n/a for test',
form_unique_id=self.form.unique_id,
visit_number=visit_number,
window_position=window_position,
)
)
def test_get_visit_scheduler_form_phase(self):
with create_case(self.domain, 'person') as case:
phase_num, phase = self.get_helper(case).get_visit_scheduler_form_phase(self.module)
self.assertEqual(phase_num, 2)
self.assertEqual(phase.to_json(), self.module.schedule_phases[1].to_json())
def test_calculate_window_date(self):
with create_case(self.domain, 'person') as case:
helper = self.get_helper(case, window_position=VISIT_WINDOW_START)
self.assertEqual(
helper.calculate_window_date(self.form.schedule.visits[1], date(2017, 8, 1)),
date(2017, 7, 30)
)
helper = self.get_helper(case, window_position=VISIT_WINDOW_DUE_DATE)
self.assertEqual(
helper.calculate_window_date(self.form.schedule.visits[1], date(2017, 8, 1)),
date(2017, 8, 1)
)
helper = self.get_helper(case, window_position=VISIT_WINDOW_END)
self.assertEqual(
helper.calculate_window_date(self.form.schedule.visits[1], date(2017, 8, 1)),
date(2017, 8, 4)
)
def test_get_case_current_schedule_phase(self):
with create_case(self.domain, 'person') as case:
helper = self.get_helper(case)
self.assertIsNone(helper.get_case_current_schedule_phase())
update_case(self.domain, case.case_id, case_properties={'current_schedule_phase': '2'})
case = CommCareCase.objects.get_case(case.case_id, self.domain)
helper = self.get_helper(case)
self.assertEqual(helper.get_case_current_schedule_phase(), 2)
def test_get_visit(self):
with create_case(self.domain, 'person') as case:
helper = self.get_helper(case, visit_number=1)
self.assertEqual(
helper.get_visit(self.form).to_json(),
self.form.schedule.visits[1].to_json()
)
# Repeat visits aren't supported
helper = self.get_helper(case, visit_number=2)
with self.assertRaises(VisitSchedulerIntegrationHelper.VisitSchedulerIntegrationException):
helper.get_visit(self.form)
# Index out of range
helper = self.get_helper(case, visit_number=999)
with self.assertRaises(VisitSchedulerIntegrationHelper.VisitSchedulerIntegrationException):
helper.get_visit(self.form)
def test_get_anchor_date(self):
with create_case(self.domain, 'person') as case:
helper = self.get_helper(case)
with self.assertRaises(VisitSchedulerIntegrationHelper.VisitSchedulerIntegrationException):
helper.get_anchor_date('add')
update_case(self.domain, case.case_id, case_properties={'add': '2017-08-01'})
case = CommCareCase.objects.get_case(case.case_id, self.domain)
helper = self.get_helper(case)
self.assertEqual(helper.get_anchor_date('add'), date(2017, 8, 1))
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for Dedicating Public IP addresses
"""
#Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
import datetime
from socket import inet_aton
from struct import unpack
class TestDedicatePublicIPRange(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestDedicatePublicIPRange, cls).getClsTestClient()
cls.apiclient = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.services["zoneid"] = cls.zone.id
cls.pod = get_pod(cls.apiclient, cls.zone.id)
# Create Account
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls._cleanup = [
cls.account,
]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "publiciprange", "dedicate", "release"], required_hardware="false")
def test_dedicatePublicIpRange(self):
"""Test public IP range dedication
"""
# Validate the following:
# 1. Create a Public IP range
# 2. Created IP range should be present, verify with listVlanIpRanges
# 3. Dedicate the created IP range to user account
# 4. Verify IP range is dedicated, verify with listVlanIpRanges
# 5. Release the dedicated Public IP range back to the system
# 6. Verify IP range has been released, verify with listVlanIpRanges
# 7. Delete the Public IP range
self.debug("Creating Public IP range")
self.public_ip_range = PublicIpRange.create(
self.apiclient,
self.services
)
list_public_ip_range_response = PublicIpRange.list(
self.apiclient,
id=self.public_ip_range.vlan.id
)
self.debug(
"Verify listPublicIpRanges response for public ip ranges: %s" \
% self.public_ip_range.vlan.id
)
self.assertEqual(
isinstance(list_public_ip_range_response, list),
True,
"Check for list Public IP range response"
)
public_ip_response = list_public_ip_range_response[0]
self.assertEqual(
public_ip_response.id,
self.public_ip_range.vlan.id,
"Check public ip range response id is in listVlanIpRanges"
)
self.debug("Dedicating Public IP range");
dedicate_public_ip_range_response = PublicIpRange.dedicate(
self.apiclient,
self.public_ip_range.vlan.id,
account=self.account.name,
domainid=self.account.domainid
)
list_public_ip_range_response = PublicIpRange.list(
self.apiclient,
id=self.public_ip_range.vlan.id
)
public_ip_response = list_public_ip_range_response[0]
self.assertEqual(
public_ip_response.account,
self.account.name,
"Check account name is in listVlanIpRanges as the account public ip range is dedicated to"
)
self.debug("Releasing Public IP range");
self.public_ip_range.release(self.apiclient)
list_public_ip_range_response = PublicIpRange.list(
self.apiclient,
id=self.public_ip_range.vlan.id
)
public_ip_response = list_public_ip_range_response[0]
self.assertEqual(
public_ip_response.account,
"system",
"Check account name is system account in listVlanIpRanges"
)
self.debug("Deleting Public IP range");
self.public_ip_range.delete(self.apiclient)
return
@attr(tags = ["advanced", "publiciprange", "dedicate", "release"], required_hardware="false")
def test_dedicate_public_ip_range_for_system_vms(self):
"""Test public IP range dedication for SSVM and CPVM
"""
# Validate the following:
# 1. Create a Public IP range for system vms
# 2. Created IP range should be present and marked as forsystemvms=true, verify with listVlanIpRanges
# 7. Delete the Public IP range
services = {
"gateway":"192.168.99.1",
"netmask":"255.255.255.0",
"startip":"192.168.99.2",
"endip":"192.168.99.200",
"forvirtualnetwork":self.services["forvirtualnetwork"],
"zoneid":self.services["zoneid"],
"vlan":self.services["vlan"]
}
self.public_ip_range = PublicIpRange.create(
self.apiclient,
services,
forsystemvms = True
)
created_ip_range_response = PublicIpRange.list(
self.apiclient,
id = self.public_ip_range.vlan.id
)
self.assertEqual(
len(created_ip_range_response),
1,
"Check listVlanIpRanges response"
)
self.assertTrue(
created_ip_range_response[0].forsystemvms,
"Check forsystemvms parameter in created vlan ip range"
)
# Delete range
self.public_ip_range.delete(self.apiclient)
def get_ip_as_number(self, ip_string):
""" Return numeric value for ip (passed as a string)
"""
packed_ip = inet_aton(ip_string)
return unpack(">L", packed_ip)[0]
def is_ip_in_range(self, start_ip, end_ip, ip_to_test):
""" Check whether ip_to_test belongs to IP range between start_ip and end_ip
"""
start = self.get_ip_as_number(start_ip)
end = self.get_ip_as_number(end_ip)
ip = self.get_ip_as_number(ip_to_test)
return start <= ip and ip <= end
def wait_for_system_vm_start(self, domain_id, systemvmtype):
""" Wait until system vm is Running
"""
def checkSystemVMUp():
response = list_ssvms(
self.apiclient,
systemvmtype=systemvmtype,
domainid=domain_id
)
if isinstance(response, list):
if response[0].state == 'Running':
return True, response[0].id
return False, None
res, systemvmId = wait_until(3, 200, checkSystemVMUp)
if not res:
raise Exception("Failed to wait for systemvm to be running")
return systemvmId
def base_system_vm(self, services, systemvmtype):
"""
Base for CPVM or SSVM depending on systemvmtype parameter
"""
# Create range for system vms
self.debug("Creating Public IP range for system vms")
self.public_ip_range = PublicIpRange.create(
self.apiclient,
services,
forsystemvms = True
)
# List Running System VM
list_systemvm_response = list_ssvms(
self.apiclient,
systemvmtype=systemvmtype,
state='Running',
domainid=self.public_ip_range.vlan.domainid
)
self.assertTrue(
isinstance(list_systemvm_response, list),
"Check list response returns a valid list"
)
self.assertEqual(
len(list_systemvm_response),
1,
"Check list response size"
)
# Delete System VM
systemvm = list_systemvm_response[0]
self.debug("Destroying System VM: %s" % systemvm.id)
cmd = destroySystemVm.destroySystemVmCmd()
cmd.id = systemvm.id
self.apiclient.destroySystemVm(cmd)
# Wait for CPVM to start
systemvm_id = self.wait_for_system_vm_start(
self.public_ip_range.vlan.domainid,
systemvmtype
)
self.assertNotEqual(
systemvm_id,
None,
"Check CPVM id is not none"
)
list_systemvm_response = list_ssvms(
self.apiclient,
id=systemvm_id
)
self.assertEqual(
isinstance(list_systemvm_response, list),
True,
"Check list response returns a valid list"
)
systemvm_response = list_systemvm_response[0]
self.debug("System VM state after debug: %s" % systemvm_response.state)
self.assertEqual(
systemvm_response.state,
'Running',
"Check whether System VM is running or not"
)
# Verify System VM got IP in the created range
startip = services["startip"]
endip = services["endip"]
cpvm_ip = systemvm_response.publicip
self.assertTrue(
self.is_ip_in_range(startip, endip, cpvm_ip),
"Check whether System VM Public IP is in range dedicated to system vms"
)
# Disable Zone to be sure System VMs will not get recreated between calls
cmd = updateZone.updateZoneCmd()
cmd.id = self.zone.id
cmd.allocationstate = 'Disabled'
self.apiclient.updateZone(cmd)
# Delete System VM and IP range, so System VM can get IP from original ranges
self.debug("Destroying System VM: %s" % systemvm_id)
cmd = destroySystemVm.destroySystemVmCmd()
cmd.id = systemvm_id
self.apiclient.destroySystemVm(cmd)
domain_id = self.public_ip_range.vlan.domainid
self.public_ip_range.delete(self.apiclient)
# Enable Zone
cmd = updateZone.updateZoneCmd()
cmd.id = self.zone.id
cmd.allocationstate = 'Enabled'
self.apiclient.updateZone(cmd)
# Wait for System VM to start and check System VM public IP
systemvm_id = self.wait_for_system_vm_start(
domain_id,
systemvmtype
)
list_systemvm_response = list_ssvms(
self.apiclient,
id=systemvm_id
)
self.assertFalse(
self.is_ip_in_range(startip, endip, list_systemvm_response[0].publicip),
"Check System VM Public IP is not in range dedicated to system vms"
)
return True
def exists_public_ip_range_for_system_vms(self, zoneid):
"""
Return True if there exists a public IP range dedicated for system vms in zoneid
"""
existing_ip_ranges_response = PublicIpRange.list(
self.apiclient,
zoneid=zoneid
)
for r in existing_ip_ranges_response:
if r.forsystemvms:
return True
return False
@attr(tags = ["advanced", "publiciprange", "dedicate", "release"], required_hardware="false")
def test_dedicate_public_ip_range_for_system_vms_01_ssvm(self):
"""Test SSVM Public IP
"""
self.debug("Precondition: No public IP range dedicated for system vms in the environment")
if self.exists_public_ip_range_for_system_vms(self.services["zoneid"]):
self.skipTest("An existing IP range defined for system vms, aborting test")
services = {
"gateway":"192.168.100.1",
"netmask":"255.255.255.0",
"startip":"192.168.100.2",
"endip":"192.168.100.200",
"forvirtualnetwork":self.services["forvirtualnetwork"],
"zoneid":self.services["zoneid"],
"vlan":self.services["vlan"]
}
try:
self.base_system_vm(
services,
'secondarystoragevm'
)
except Exception:
self.delete_range()
return
@attr(tags = ["advanced", "publiciprange", "dedicate", "release"], required_hardware="false")
def test_dedicate_public_ip_range_for_system_vms_02_cpvm(self):
"""Test CPVM Public IP
"""
self.debug("Precondition: No public IP range dedicated for system vms in the environment")
if self.exists_public_ip_range_for_system_vms(self.services["zoneid"]):
self.skipTest("An existing IP range defined for system vms, aborting test")
services = {
"gateway":"192.168.200.1",
"netmask":"255.255.255.0",
"startip":"192.168.200.2",
"endip":"192.168.200.200",
"forvirtualnetwork":self.services["forvirtualnetwork"],
"zoneid":self.services["zoneid"],
"vlan":self.services["vlan"]
}
try:
self.base_system_vm(
services,
'consoleproxy'
)
except Exception:
self.delete_range()
return
def delete_range(self):
# List System VMs
system_vms = list_ssvms(
self.apiclient,
)
# Disable Zone to be sure System VMs will not get recreated between calls
cmd = updateZone.updateZoneCmd()
cmd.id = self.zone.id
cmd.allocationstate = 'Disabled'
self.apiclient.updateZone(cmd)
# Delete System VM and IP range, so System VM can get IP from original ranges
if system_vms:
for v in system_vms:
self.debug("Destroying System VM: %s" % v.id)
cmd = destroySystemVm.destroySystemVmCmd()
cmd.id = v.id
self.apiclient.destroySystemVm(cmd)
self.public_ip_range.delete(self.apiclient)
# Enable Zone
cmd = updateZone.updateZoneCmd()
cmd.id = self.zone.id
cmd.allocationstate = 'Enabled'
self.apiclient.updateZone(cmd)
| |
# pylint: skip-file
"""
Test the "external" interface.
The "external" interface is what the user sees. It should be pythonic and easy
to use.
"""
import sys
from datetime import timedelta
from unittest.mock import call, patch
import pytest
from x690.types import (
Integer,
ObjectIdentifier,
OctetString,
Sequence,
to_bytes,
)
from puresnmp.aio.api.pythonic import (
bulkget,
bulktable,
bulkwalk,
get,
getnext,
multiget,
multiset,
multiwalk,
set,
table,
walk,
)
from puresnmp.const import Version
from puresnmp.exc import NoSuchOID, SnmpError
from puresnmp.pdu import BulkGetRequest, GetNextRequest, GetRequest, VarBind
from puresnmp.types import Counter, Gauge, IpAddress
from puresnmp.util import BulkResult
from .asyncmock import AsyncGenMock, AsyncMock
pytestmark = pytest.mark.skipif(
sys.version_info < (3, 5), reason="requires python3.5"
)
class TestGet:
@pytest.mark.asyncio
async def test_get_string(self):
expected = (
b"Linux d24cf7f36138 4.4.0-28-generic #47-Ubuntu SMP "
b"Fri Jun 24 10:09:13 UTC 2016 x86_64"
)
with patch(
"puresnmp.aio.api.pythonic.raw", new_callable=AsyncMock
) as mck:
mck.get.return_value = OctetString(
b"Linux d24cf7f36138 4.4.0-28-generic #47-Ubuntu SMP "
b"Fri Jun 24 10:09:13 UTC 2016 x86_64"
)
result = await get("::1", "private", "1.2.3")
assert result == expected
@pytest.mark.asyncio
async def test_get_oid(self):
expected = "1.3.6.1.4.1.8072.3.2.10"
with patch(
"puresnmp.aio.api.pythonic.raw", new_callable=AsyncMock
) as mck:
mck.get.return_value = ObjectIdentifier.from_string(
"1.3.6.1.4.1.8072.3.2.10"
)
result = await get("::1", "private", "1.2.3")
assert result == expected
class TestSet:
@pytest.mark.asyncio
async def test_set_string(self):
expected = b"foo"
with patch(
"puresnmp.aio.api.pythonic.raw", new_callable=AsyncMock
) as mck:
mck.multiset.return_value = {
ObjectIdentifier.from_string("1.2.3"): OctetString(b"foo")
}
result = await set("::1", "private", "1.2.3", OctetString(b"foo"))
assert result == expected
@pytest.mark.asyncio
async def test_set_string_absolute(self):
expected = b"foo"
with patch(
"puresnmp.aio.api.pythonic.raw", new_callable=AsyncMock
) as mck:
mck.multiset.return_value = {
ObjectIdentifier.from_string("1.2.3"): OctetString(b"foo")
}
result = await set("::1", "private", ".1.2.3", OctetString(b"foo"))
assert result == expected
class TestWalk:
@pytest.mark.asyncio
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6")
async def test_walk(self):
expected = [
VarBind("1.3.6.1.2.1.2.2.1.5.1", 10000000),
VarBind("1.3.6.1.2.1.2.2.1.5.13", 4294967295),
]
with patch(
"puresnmp.aio.api.pythonic.raw", new_callable=AsyncGenMock
) as mck:
mck.walk.return_value = [
VarBind(
ObjectIdentifier.from_string("1.3.6.1.2.1.2.2.1.5.1"),
Gauge(10000000),
),
VarBind(
ObjectIdentifier.from_string("1.3.6.1.2.1.2.2.1.5.13"),
Integer(4294967295),
),
]
result = []
async for x in walk("::1", "public", "1.3.6.1.2.1.2.2.1.5"):
result.append(x)
assert result == expected
class TestMultiGet:
@pytest.mark.asyncio
async def test_multiget(self):
expected = [
"1.3.6.1.4.1.8072.3.2.10",
b"Linux 7fbf2f0c363d 4.4.0-28-generic #47-Ubuntu SMP Fri "
b"Jun 24 10:09:13 UTC 2016 x86_64",
]
with patch(
"puresnmp.aio.api.pythonic.raw", new_callable=AsyncMock
) as mck:
mck.multiget.return_value = [
ObjectIdentifier.from_string("1.3.6.1.4.1.8072.3.2.10"),
OctetString(
b"Linux 7fbf2f0c363d 4.4.0-28-generic "
b"#47-Ubuntu SMP Fri Jun 24 10:09:13 "
b"UTC 2016 x86_64"
),
]
result = await multiget(
"::1",
"private",
[
"1.3.6.1.2.1.1.2.0",
"1.3.6.1.2.1.1.1.0",
],
)
assert result == expected
class TestMultiWalk:
@pytest.mark.asyncio
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6")
async def test_multi_walk(self):
expected = [
VarBind("1.3.6.1.2.1.2.2.1.1.1", 1),
VarBind("1.3.6.1.2.1.2.2.1.2.1", b"lo"),
VarBind("1.3.6.1.2.1.2.2.1.1.78", 78),
VarBind("1.3.6.1.2.1.2.2.1.2.78", b"eth0"),
]
with patch(
"puresnmp.aio.api.pythonic.raw", new_callable=AsyncGenMock
) as mck:
mck.multiwalk.return_value = [
VarBind(
ObjectIdentifier.from_string("1.3.6.1.2.1.2.2.1.1.1"), 1
),
VarBind(
ObjectIdentifier.from_string("1.3.6.1.2.1.2.2.1.2.1"), b"lo"
),
VarBind(
ObjectIdentifier.from_string("1.3.6.1.2.1.2.2.1.1.78"), 78
),
VarBind(
ObjectIdentifier.from_string("1.3.6.1.2.1.2.2.1.2.78"),
b"eth0",
),
]
result = []
async for x in multiwalk(
"::1", "public", ["1.3.6.1.2.1.2.2.1.1", "1.3.6.1.2.1.2.2.1.2"]
):
result.append(x)
# TODO (advanced): should order matter in the following result?
assert len(result) == len(expected)
class TestMultiSet:
@pytest.mark.asyncio
async def test_multiset(self):
"""
Test setting multiple OIDs at once.
NOTE: The OID '1.3.6.1.2.1.1.5.0' below is manually edited for
unit-testing. It probably has a different type in the real world!
"""
with patch(
"puresnmp.aio.api.pythonic.raw", new_callable=AsyncMock
) as mck:
mck.multiset.return_value = {
"1.3.6.1.2.1.1.4.0": OctetString(b"hello@world.com"),
"1.3.6.1.2.1.1.5.0": OctetString(b"hello@world.com"),
}
result = await multiset(
"::1",
"private",
[
("1.3.6.1.2.1.1.4.0", OctetString(b"hello@world.com")),
("1.3.6.1.2.1.1.5.0", OctetString(b"hello@world.com")),
],
)
expected = {
"1.3.6.1.2.1.1.4.0": b"hello@world.com",
"1.3.6.1.2.1.1.5.0": b"hello@world.com",
}
assert result == expected
@pytest.mark.asyncio
async def test_multiset_absolute(self):
with patch(
"puresnmp.aio.api.pythonic.raw", new_callable=AsyncMock
) as mck:
mck.multiset.return_value = {
"1.3.6.1.2.1.1.4.0": OctetString(b"hello@world.com"),
"1.3.6.1.2.1.1.5.0": OctetString(b"hello@world.com"),
}
result = await multiset(
"::1",
"private",
[
(".1.3.6.1.2.1.1.4.0", OctetString(b"hello@world.com")),
(".1.3.6.1.2.1.1.5.0", OctetString(b"hello@world.com")),
],
)
expected = {
"1.3.6.1.2.1.1.4.0": b"hello@world.com",
"1.3.6.1.2.1.1.5.0": b"hello@world.com",
}
assert result == expected
class TestGetNext:
@pytest.mark.asyncio
async def test_getnext(self):
expected = VarBind("1.3.6.1.6.3.1.1.6.1.0", 354522558)
with patch(
"puresnmp.aio.api.pythonic.raw", new_callable=AsyncMock
) as mck:
mck.multigetnext.return_value = [
VarBind("1.3.6.1.6.3.1.1.6.1.0", Integer(354522558))
]
result = await getnext("::1", "private", "1.3.6.1.5")
assert result == expected
class TestGetBulkGet:
@pytest.mark.asyncio
async def test_bulkget(self):
expected = BulkResult(
{
"1.3.6.1.2.1.1.1.0": b"Linux 7e68e60fe303 4.4.0-28-generic "
b"#47-Ubuntu SMP Fri Jun 24 10:09:13 UTC 2016 x86_64"
},
{
"1.3.6.1.2.1.3.1.1.1.10.1.172.17.0.1": 10,
"1.3.6.1.2.1.3.1.1.2.10.1.172.17.0.1": b"\x02B\xe2\xc5\x8d\t",
"1.3.6.1.2.1.3.1.1.3.10.1.172.17.0.1": b"\xac\x11\x00\x01",
"1.3.6.1.2.1.4.1.0": 1,
"1.3.6.1.2.1.4.3.0": 57,
},
)
with patch(
"puresnmp.aio.api.pythonic.raw", new_callable=AsyncMock
) as mck:
mck.bulkget.return_value = BulkResult(
{
"1.3.6.1.2.1.1.1.0": OctetString(
b"Linux 7e68e60fe303 4.4.0-28-generic "
b"#47-Ubuntu SMP Fri Jun 24 10:09:13 UTC 2016 x86_64"
)
},
{
"1.3.6.1.2.1.3.1.1.1.10.1.172.17.0.1": Integer(10),
"1.3.6.1.2.1.3.1.1.2.10.1.172.17.0.1": OctetString(
b"\x02B\xe2\xc5\x8d\t"
),
"1.3.6.1.2.1.3.1.1.3.10.1.172.17.0.1": IpAddress(
b"\xac\x11\x00\x01"
),
"1.3.6.1.2.1.4.1.0": Integer(1),
"1.3.6.1.2.1.4.3.0": Counter(57),
},
)
result = await bulkget(
"::1",
"public",
["1.3.6.1.2.1.1.1"],
["1.3.6.1.2.1.3.1"],
max_list_size=5,
)
assert result == expected
class TestGetBulkWalk:
@pytest.mark.asyncio
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6")
async def test_bulkwalk(self):
request_ids = [1001613222, 1001613223, 1001613224]
with patch(
"puresnmp.aio.api.pythonic.raw", new_callable=AsyncGenMock
) as mck:
mck.multiwalk.return_value = [
VarBind("1.3.6.1.2.1.2.2.1.1.1", Integer(1)),
VarBind("1.3.6.1.2.1.2.2.1.1.10", Integer(10)),
VarBind("1.3.6.1.2.1.2.2.1.2.1", OctetString(b"lo")),
VarBind("1.3.6.1.2.1.2.2.1.22.10", ObjectIdentifier(0, 0)),
]
result = []
async for x in bulkwalk(
"127.0.0.1", "private", ["1.3.6.1.2.1.2.2"], bulk_size=20
):
result.append(x)
expected = [
VarBind("1.3.6.1.2.1.2.2.1.1.1", 1),
VarBind("1.3.6.1.2.1.2.2.1.1.10", 10),
VarBind("1.3.6.1.2.1.2.2.1.2.1", b"lo"),
VarBind("1.3.6.1.2.1.2.2.1.22.10", "0.0"),
]
assert result == expected
class TestTable:
@pytest.mark.asyncio
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6")
async def test_table(self):
with patch(
"puresnmp.aio.api.pythonic.raw", new_callable=AsyncGenMock
) as mck:
oid = ObjectIdentifier.from_string
mck.table.return_value = [
{"0": "1", "1": Integer(1)},
{"0": "2", "1": Integer(2)},
]
aio_result = table("1.2.3.4", "private", "1.2")
result = []
async for row in aio_result:
result.append(row)
expected = [
{"0": "1", "1": 1},
{"0": "2", "1": 2},
]
assert len(result) == len(expected)
class TestBulkTable:
@pytest.mark.asyncio
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6")
async def test_table(self):
with patch("puresnmp.aio.api.pythonic.raw") as mck:
oid = ObjectIdentifier.from_string
mck.bulktable.return_value = [
{"0": "1", "1": Integer(1)},
{"0": "2", "1": Integer(2)},
]
aio_result = bulktable("1.2.3.4", "private", "1.2")
result = []
async for row in aio_result:
result.append(row)
expected = [
{"0": "1", "1": 1},
{"0": "2", "1": 2},
]
assert result == expected
| |
#!/usr/bin/env python2
#192.168.0.3 /16 subnet
from flask import Flask, request
import MySQLdb
import time
import RPi.GPIO as GPIO
import requests
"""
A simple script to test the registration process.
Must be run as root for now.
"""
app = Flask(__name__)
desiredtemp = 0
state = 0
#nodes = {'remote1': 0, 'remote2': 0, 'remote3': 0}
nodes = {}
GPIO.setmode(GPIO.BOARD)
GPIO.setup(19, GPIO.OUT)
GPIO.setup(21, GPIO.OUT)
GPIO.setup(23, GPIO.OUT)
Con = MySQLdb.Connect(host="69.65.10.232", port=3306, user="timuster_ece4564", passwd="netApps4564", db="timuster_ece4564")
Cursor = Con.cursor()
# Make SQL string and execute it
sql = "SELECT avg_temp, current_temp, mode, status FROM projectDB"
Cursor.execute(sql)
#Putting IP addressees to UUIDs
@app.route('/register', methods = ['POST'])
def reg_client():
uuid = request.form['id']
retstr = "Registered {0}".format(uuid)
#while (iter(nodes)):
# ip = request.remote_addr;
ip = request.remote_addr;
if uuid not in nodes.keys():
nodes[uuid] = ip;
print(retstr)
return retstr
@app.route('/temp', methods = ['GET'])
def get_avg():
return str(calculate_avg())
def calculate_avg():
temp = 0;
if len(nodes) == 0:
mynodes = {'B827EB5D4F2C':'192.168.0.4', 'B827EB82DA58': '192.168.0.3', 'B827EB9D0DB3':'192.168.0.1'}
else:
mynodes = nodes
successes = 0
for x in mynodes:
try:
r = requests.get("http://" + str(mynodes[x]) + "/temp");
temp += int(float(r.text));
successes += 1
except:
pass
if successes != 0:
avg = temp / successes;
else:
avg = 0;
try:
Cursor.execute("""
UPDATE projectDB
SET avg_temp=%s
WHERE db_index=%s
""", (avg,0))
except:
pass
return avg;
#From Database
@app.route('/status', methods = ['GET'])
def setMode():
Con = MySQLdb.Connect(host="69.65.10.232", port=3306, user="timuster_ece4564", passwd="netApps4564", db="timuster_ece4564")
Cursor = Con.cursor()
sql = "SELECT avg_temp, current_temp, mode, status FROM projectDB"
Cursor.execute(sql)
Results = Cursor.fetchall();
desiredtemp = int(Results[0][1])
#(avgtemp, settemp, mode, status)
if (int(Results[0][2]) == 0):
set_color("red");
return "heating"
if (int(Results[0][2]) == 1):
set_color("blue");
return "cooling"
if (int(Results[0][2]) == 2):
if (desiredtemp > calculate_avg()):
#turn LEDS Red
set_color("red");
return str(0);
if (desiredtemp < calculate_avg()):
#turn LEDs Blue
set_color("blue");
return str(1);
if (desiredtemp == calculate_avg()):
#idle/off
set_color("white");
return "default"
if (int(Results[0][2]) == 3):
set_color("white");
return "off"
return "error"
#From remote node
#@app.route('/settemp', methods = ['GET'])
#def get_temp():
# return desiredtemp
#from Database
@app.route('/settemp', methods = ['POST'])
def settemp():
r = request.form['temp']
newtemp = int(r.text)
Con = MySQLdb.Connect(host="69.65.10.232", port=3306, user="timuster_ece4564", passwd="netApps4564", db="timuster_ece4564")
Cursor = Con.cursor()
sql = "SELECT avg_temp, current_temp, mode, status FROM projectDB"
Cursor.execute(sql)
Results = Cursor.fetchall()
desiredtemp = int(Results[0][1])
Cursor.execute("""
UPDATE projectDB
SET avg_temp=%s
WHERE db_index=%s
""", (newtemp ,0))
if (desiredtemp > calculate_avg()):
#turn LEDS Red
set_color("red");
return str(0);
if (desiredtemp < calculate_avg()):
#turn LEDs Blue
set_color("blue");
return str(1);
if (desiredtemp == calculate_avg()):
#idle/off
set_color("white");
def setdbtemp(newtemp):
Con = MySQLdb.Connect(host="69.65.10.232", port=3306, user="timuster_ece4564", passwd="netApps4564", db="timuster_ece4564")
Cursor = Con.cursor()
sql = "SELECT avg_temp, current_temp, mode, status FROM projectDB"
Cursor.execute(sql)
Results = Cursor.fetchall()
desiredtemp = int(Results[0][1])
Cursor.execute("""
UPDATE projectDB
SET avg_temp=%s
WHERE db_index=%s
""", (newtemp ,0))
if (desiredtemp > calculate_avg()):
#turn LEDS Red
set_color("red");
return str(0);
if (desiredtemp < calculate_avg()):
#turn LEDs Blue
set_color("blue");
return str(1);
if (desiredtemp == calculate_avg()):
#idle/off
set_color("white");
return str(2)
#from remote node
@app.route('/temp/<uuid>', methods = ['GET'])
def nodetemp(uuid):
r = requests.get("http://" + str(nodes[uuid]) + "/temp")
temp = r.text
return temp
# Database
@app.route('/state', methods = ['POST'])
def currentStatus():
#Always Heating, always cooling, idle
if (setdbtemp() == "0"):
Cursor.execute("""
UPDATE projectDB
SET status=%s
WHERE db_index=%s
""", (0 ,0))
return "Heating"
elif (setdbtemp() == "1"):
Cursor.execute("""
UPDATE projectDB
SET status=%s
WHERE db_index=%s
""", (1 ,0))
return "Cooling"
else:
Cursor.execute("""
UPDATE projectDB
SET status=%s
WHERE db_index=%s
""", (2 ,0))
return "Idle"
def set_led(r, g, b):
"""Set the color of the LED"""
GPIO.output(19, r)
GPIO.output(21, g)
GPIO.output(23, b)
def set_color(color):
"""Receives name of color and sets the LED"""
if color == 'red':
set_led(0, 1, 1)
elif color == 'green':
set_led(1, 0, 1)
elif color == 'blue':
set_led(1, 1, 0)
elif color == 'yellow':
set_led(0, 0, 1)
elif color == 'magenta':
set_led(0, 1, 0)
elif color == 'cyan':
set_led(1, 0, 0)
elif color == 'white':
set_led(0, 0, 0)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, debug=True)
| |
# fugato.views
# Views for the Fugato app
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Thu Oct 23 15:05:12 2014 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: views.py [8eae6c4] benjamin@bengfort.com $
"""
Views for the Fugato app
"""
##########################################################################
## Imports
##########################################################################
from fugato.models import *
from voting.models import Vote
from tagging.models import Tag
from fugato.serializers import *
from voting.serializers import *
from rest_framework import viewsets
from users.mixins import LoginRequired
from users.permissions import IsAuthorOrReadOnly
from tagging.serializers import CSVTagSerializer
from django.views.generic import DetailView, ListView
from rest_framework import status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import detail_route, list_route
##########################################################################
## HTTP Generated Views
##########################################################################
class QuestionList(LoginRequired, ListView):
"""
Authenticated web application view that serves all context and content
to kick off the Backbone front-end application.
"""
model = Question
template_name = "fugato/list.html"
context_object_name = 'question_list'
paginate_by = 20
def get_queryset(self):
"""
Performs filtering on the queryset based on the query arguments.
"""
queryset = super(QuestionList, self).get_queryset()
# Get possible tag and sort options from the query string
self.search_by = self.request.GET.get('search', "").strip()
self.sorted_by = self.request.GET.get('sort', 'recent').lower()
self.tagged_by = self.request.GET.get('tag', None)
# Filter the queryset by the search term
if self.search_by:
queryset = queryset.search(self.search_by)
# Filter the queryset by the tag object
if self.tagged_by:
# Convert the query string into a Tag object
try:
self.tagged_by = Tag.objects.get(slug=self.tagged_by)
queryset = self.tagged_by.questions.all()
except Tag.DoesNotExist:
queryset = queryset.none()
# Select the order by key constraint
if self.sorted_by == 'recent':
queryset = queryset.order_by('-modified')
elif self.sorted_by == 'newest':
queryset = queryset.order_by('-created')
elif self.sorted_by == 'popular':
queryset = queryset.count_votes().order_by('-num_votes')
elif self.sorted_by == 'frequent':
queryset = queryset.count_answers().order_by('-num_answers')
elif self.sorted_by == 'unanswered':
queryset = queryset.unanswered()
else:
# This is the default, but possibly should warn or except
self.sorted_by = 'recent'
queryset = queryset.order_by('-modified')
# Construct the queryset request
return queryset
def get_context_data(self, **kwargs):
context = super(QuestionList, self).get_context_data(**kwargs)
# Add query params for the view
context['sort'] = self.sorted_by
context['tag'] = self.tagged_by
context['search'] = self.search_by
# Add rendering params for the view
context['navbar_active'] = "questions"
# TODO: This might be very slow, improve this!
context['num_all_questions'] = self.model.objects.count()
return context
class QuestionDetail(LoginRequired, DetailView):
model = Question
template_name = "fugato/question.html"
context_object_name = "question"
##########################################################################
## API HTTP/JSON Views
##########################################################################
class QuestionTypeaheadViewSet(viewsets.ViewSet):
"""
Endpoint for returning a typeahead of question texts.
"""
def list(self, request):
queryset = Question.objects.values_list('text', flat=True)
return Response(queryset)
class QuestionViewSet(viewsets.ModelViewSet):
queryset = Question.objects.order_by('-created')
serializer_class = QuestionSerializer
@detail_route(methods=['post'], permission_classes=[IsAuthenticated])
def vote(self, request, pk=None):
"""
Note that the upvotes and downvotes keys are required by the front-end
"""
question = self.get_object()
serializer = VotingSerializer(data=request.data, context={'request': request})
if serializer.is_valid():
kwargs = {
'content': question,
'user': request.user,
'vote': serializer.validated_data['vote'],
}
# Vote for the question
_, created = Vote.objects.punch_ballot(**kwargs)
# Construct the Response
response = serializer.data
response.update({'status': 'vote recorded', 'created': created,
'upvotes': question.votes.upvotes().count(),
'downvotes': question.votes.downvotes().count()})
return Response(response)
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
@detail_route(methods=['get', 'post'], permission_classes=[IsAuthenticated])
def tags(self, request, pk=None):
"""
A helper endpoint to post tags represented as CSV data.
"""
question = self.get_object()
if request.method == 'GET':
return Response(CSVTagSerializer.serialize_question(question))
serializer = CSVTagSerializer(data=request.data, context={'request': request})
if serializer.is_valid():
# First add any tags to the question
for tag in serializer.validated_data['csv_tags']:
# Don't add tags again (minimize db queries )
if question.has_tag(tag): continue
# Otherwise, get or create the tag
tag, _ = Tag.objects.tag(
tag,
defaults = {
'creator': request.user,
}
)
# Add the tag to the question object
question.tags.add(tag)
# Next delete any tags that were removed from the question
slugs = [
slugify(t) for t in serializer.validated_data['csv_tags']
]
for tag in question.tags.all():
if tag.slug not in slugs:
question.tags.remove(tag)
return Response(CSVTagSerializer.serialize_question(question))
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
@detail_route(methods=['get'], permission_classes=[IsAuthenticated])
def answers(self, request, pk=None):
"""
Returns a list of all answers associated with the question
"""
question = self.get_object()
answers = question.answers.order_by('created') # TODO: order by vote count
page = self.paginate_queryset(answers)
if page is not None:
serializer = AnswerSerializer(page, context={'request': request})
paginator = self.pagination_class()
return self.get_paginated_response(serializer.data)
serializer = AnswerSerializer(answers, context={'request': request})
return Response(serializer.data)
class AnswerViewSet(viewsets.ModelViewSet):
queryset = Answer.objects.order_by('-created')
serializer_class = AnswerSerializer
@detail_route(methods=['post'], permission_classes=[IsAuthenticated])
def vote(self, request, pk=None):
"""
Note that the upvotes and downvotes keys are required by the front-end
"""
answer = self.get_object()
serializer = VotingSerializer(data=request.data, context={'request': request})
if serializer.is_valid():
kwargs = {
'content': answer,
'user': request.user,
'vote': serializer.validated_data['vote'],
}
_, created = Vote.objects.punch_ballot(**kwargs)
response = serializer.data
response.update({'status': 'vote recorded', 'created': created,
'upvotes': answer.votes.upvotes().count(),
'downvotes': answer.votes.downvotes().count()})
return Response(response)
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
| |
import os
from os import path
import unittest
from datetime import datetime
from collections import OrderedDict
from semantic_version import Version as SemanticVersion
import mytardis_ngs_ingestor
from mytardis_ngs_ingestor.illumina.run_info import \
parse_samplesheet, \
filter_samplesheet_by_project, \
filter_samplesheet_by_project, \
rta_complete_parser, get_sample_project_mapping, \
parse_sample_info_from_filename
class IlluminaParserTestCase(unittest.TestCase):
def setUp(self):
self.run1_dir = path.join(
path.dirname(__file__),
'test_data/runs/130907_DMO177_0001_AH9PJLADXZ')
self.run2_dir = path.join(
path.dirname(__file__),
'test_data/runs/140907_DMO177_0002_AC9246ACXX')
self.run3_dir = path.join(
path.dirname(__file__),
'test_data/runs/150907_M04242_0003_000000000-ANV1L')
self.samplesheet_csv_path = path.join(os.path.dirname(__file__),
self.run1_dir,
'SampleSheet.csv')
self.samplesheet_v4_path = path.join(os.path.dirname(__file__),
self.run2_dir,
'SampleSheet.csv')
def tearDown(self):
pass
def test_parse_samplesheet_v4(self):
samples, chemistry = parse_samplesheet(self.samplesheet_v4_path,
standardize_keys=True)
self.assertEqual(chemistry, 'TruSeq LT')
expected = [
{'index': 'CGATGT', 'Lane': '1', 'Description': '',
'SampleID': '16-00982', 'SamplePlate': '', 'I7IndexID': 'A002',
'SampleWell': '', 'SampleProject': 'StephenLavelle',
'SampleName': 'QQ1H2O1'},
{'index': 'TGACCA', 'Lane': '1', 'Description': '',
'SampleID': '16-00983', 'SamplePlate': '', 'I7IndexID': 'A004',
'SampleWell': '', 'SampleProject': 'StephenLavelle',
'SampleName': 'QQ1H2O2'},
{'index': 'CAGATC', 'Lane': '1', 'Description': '',
'SampleID': '16-00984', 'SamplePlate': '', 'I7IndexID': 'A007',
'SampleWell': '', 'SampleProject': 'StephenLavelle',
'SampleName': 'QQ1H2O3'},
{'index': 'AACCAG', 'Lane': '2', 'Description': '',
'SampleID': '16-00487', 'SamplePlate': '', 'I7IndexID': 'A001',
'SampleWell': '', 'SampleProject': 'Shigeru_Miyamoto',
'SampleName': 'QQInputF2'},
{'index': 'TGGTGA', 'Lane': '2', 'Description': '',
'SampleID': '16-00488', 'SamplePlate': '', 'I7IndexID': 'A002',
'SampleWell': '', 'SampleProject': 'Shigeru_Miyamoto',
'SampleName': 'QQH4K4F2'},
{'index': 'AGTGAG', 'Lane': '2', 'Description': '',
'SampleID': '16-00489', 'SamplePlate': '', 'I7IndexID': 'A003',
'SampleWell': '', 'SampleProject': 'Shigeru_Miyamoto',
'SampleName': 'QQH4K9F2'},
{'index': 'AACCAG', 'Lane': '3', 'Description': '',
'SampleID': '16-01787', 'SamplePlate': '', 'I7IndexID': 'A001',
'SampleWell': '', 'SampleProject': 'Phr00t',
'SampleName': 'Q1N'},
{'index': 'TGGTGA', 'Lane': '3', 'Description': '',
'SampleID': '16-01788', 'SamplePlate': '', 'I7IndexID': 'A002',
'SampleWell': '', 'SampleProject': 'Phr00t',
'SampleName': 'Q1L'},
{'index': 'AACCAG', 'Lane': '4', 'Description': '',
'SampleID': '16-01787', 'SamplePlate': '', 'I7IndexID': 'A001',
'SampleWell': '', 'SampleProject': 'Phr00t',
'SampleName': 'Q1N'}
]
for expected_sample, sample in zip(expected, samples):
self.assertDictEqual(sample, expected_sample)
self.assertEqual(chemistry, 'TruSeq LT')
def test_parse_samplesheet_csv(self):
samples, chemistry = parse_samplesheet(self.samplesheet_csv_path)
expected = [
{'Control': 'N', 'Index': 'AACCAG', 'Lane': '1',
'Description': 'May contain nuts',
'SampleProject': 'GusFring', 'Recipe': '',
'SampleID': '14-06205-OCT4-5', 'FCID': 'H9PJLADXZ',
'SampleRef': 'Hg19', 'Operator': 'TW'},
{'Control': 'N', 'Index': 'TGGTGA', 'Lane': '1', 'Description': '',
'SampleProject': 'GusFring', 'Recipe': '',
'SampleID': '14-06206-OCT4-15', 'FCID': 'H9PJLADXZ',
'SampleRef': 'Hg19', 'Operator': 'TW'},
{'Control': 'N', 'Index': 'AGTGAG', 'Lane': '1', 'Description': '',
'SampleProject': 'GusFring', 'Recipe': '',
'SampleID': '14-06207-ZAX-5', 'FCID': 'H9PJLADXZ',
'SampleRef': 'Hg19', 'Operator': 'TW'},
{'Control': 'N', 'Index': 'GCACTA', 'Lane': '1', 'Description': '',
'SampleProject': 'GusFring', 'Recipe': '',
'SampleID': '14-06208-ZAX-15', 'FCID': 'H9PJLADXZ',
'SampleRef': 'Hg19', 'Operator': 'TW'},
{'Control': 'N', 'Index': 'TTGGCA', 'Lane': '1', 'Description': '',
'SampleProject': 'GusFring', 'Recipe': '',
'SampleID': '14-06200-Input', 'FCID': 'H9PJLADXZ',
'SampleRef': 'Hg19', 'Operator': 'TW'},
{'Control': 'N', 'Index': 'ACCTCA', 'Lane': '2', 'Description': '',
'SampleProject': 'Walter_White', 'Recipe': '',
'SampleID': '14-05655-SW38', 'FCID': 'H9PJLADXZ',
'SampleRef': 'Hg19', 'Operator': 'TW'},
{'Control': 'N', 'Index': 'AAGAGG', 'Lane': '2', 'Description': '',
'SampleProject': 'Walter_White', 'Recipe': '',
'SampleID': '14-05658-SW41', 'FCID': 'H9PJLADXZ',
'SampleRef': 'Hg19', 'Operator': 'TW'},
{'Control': 'N', 'Index': 'GGAGAA', 'Lane': '2', 'Description': '',
'SampleProject': 'Walter_White', 'Recipe': '',
'SampleID': '14-05659-SW42', 'FCID': 'H9PJLADXZ',
'SampleRef': 'Hg19', 'Operator': 'TW'},
{'Control': 'N', 'Index': 'AGCATG', 'Lane': '2', 'Description': '',
'SampleProject': 'Walter_White', 'Recipe': '',
'SampleID': '14-05660-SW43', 'FCID': 'H9PJLADXZ',
'SampleRef': 'Hg19', 'Operator': 'TW'},
{'Control': 'N', 'Index': 'GAGTCA', 'Lane': '2', 'Description': '',
'SampleProject': 'Walter_White', 'Recipe': '',
'SampleID': '14-05661-SW44', 'FCID': 'H9PJLADXZ',
'SampleRef': 'Hg19', 'Operator': 'TW'},
{'Control': 'N', 'Index': 'CGTAGA', 'Lane': '2', 'Description': '',
'SampleProject': 'Walter_White', 'Recipe': '',
'SampleID': '14-06203-SW45', 'FCID': 'H9PJLADXZ',
'SampleRef': 'Hg19', 'Operator': 'TW'},
{'Control': 'N', 'Index': 'TCAGAG', 'Lane': '2', 'Description': '',
'SampleProject': 'Walter_White', 'Recipe': '',
'SampleID': '14-06204-SW46', 'FCID': 'H9PJLADXZ',
'SampleRef': 'Hg19', 'Operator': 'TW'}]
for sample_line, expected_line in zip(samples, expected):
self.assertDictEqual(sample_line, expected_line)
self.assertEqual(chemistry, 'TruSeq LT')
def test_filter_samplesheet_by_project(self):
project_lines = filter_samplesheet_by_project(
self.samplesheet_csv_path, 'GusFring')
expected = [
'FCID,Lane,SampleID,SampleRef,Index,Description,Control,Recipe,Operator,SampleProject\r\n',
'H9PJLADXZ,1,14-06205-OCT4-5,Hg19,AACCAG,May contain nuts,N,,TW,GusFring\r\n',
'H9PJLADXZ,1,14-06206-OCT4-15,Hg19,TGGTGA,,N,,TW,GusFring\r\n',
'H9PJLADXZ,1,14-06207-ZAX-5,Hg19,AGTGAG,,N,,TW,GusFring\r\n',
'H9PJLADXZ,1,14-06208-ZAX-15,Hg19,GCACTA,,N,,TW,GusFring\r\n',
'H9PJLADXZ,1,14-06200-Input,Hg19,TTGGCA,,N,,TW,GusFring\r\n',
'#_IEMVERSION_3_TruSeq LT,,,,,,,,,\r\n'
]
for expected_line, project_line in zip(expected, project_lines):
self.assertEqual(project_line, expected_line)
def test_get_sample_project_mapping(self):
bcl2fastq_output_path = path.join(self.run2_dir,
'Data/Intensities/BaseCalls')
mapping = get_sample_project_mapping(bcl2fastq_output_path)
expected = OrderedDict(
[(u'Phr00t', [u'Phr00t/16-01787/Q1N_S7_L003_R1_001.fastq.gz',
u'Phr00t/16-01787/Q1N_S7_L004_R1_001.fastq.gz',
u'Phr00t/16-01788/Q1L_S8_L003_R1_001.fastq.gz']),
(u'Shigeru_Miyamoto', [
u'Shigeru_Miyamoto/16-00487/QQInputF2_S4_L002_R1_001.fastq.gz',
u'Shigeru_Miyamoto/16-00488/QQH4K4F2_S5_L002_R1_001.fastq.gz',
u'Shigeru_Miyamoto/16-00489/QQH4K9F2_S6_L002_R1_001.fastq.gz']),
(u'StephenLavelle', [
u'StephenLavelle/16-00982/QQ1H2O1_S1_L001_R1_001.fastq.gz',
u'StephenLavelle/16-00983/QQ1H2O2_S2_L001_R1_001.fastq.gz',
u'StephenLavelle/16-00984/QQ1H2O3_S3_L001_R1_001.fastq.gz']),
(u'Undetermined_indices',
[u'Undetermined_S0_L001_R1_001.fastq.gz',
u'Undetermined_S0_L002_R1_001.fastq.gz',
u'Undetermined_S0_L003_R1_001.fastq.gz',
u'Undetermined_S0_L004_R1_001.fastq.gz',
u'Undetermined_S0_L005_R1_001.fastq.gz',
u'Undetermined_S0_L006_R1_001.fastq.gz',
u'Undetermined_S0_L007_R1_001.fastq.gz',
u'Undetermined_S0_L008_R1_001.fastq.gz'])])
self.assertDictEqual(mapping, expected)
bcl2fastq_output_path = path.join(self.run3_dir,
'Data/Intensities/BaseCalls')
mapping = get_sample_project_mapping(bcl2fastq_output_path)
expected = OrderedDict([('',
[u'BUGS-1_CACGTCTA_L001_R1_001.fastq.gz',
u'BUGS-1_CACGTCTA_L001_R2_001.fastq.gz',
u'BUGS-2_AGCTAGTG_L001_R1_001.fastq.gz',
u'BUGS-2_AGCTAGTG_L001_R2_001.fastq.gz',
u'DRUGS-1_ACGTCGTT_L001_R1_001.fastq.gz',
u'DRUGS-1_ACGTCGTT_L001_R2_001.fastq.gz',
u'DRUGS-2_GTCCTGTT_L001_R1_001.fastq.gz',
u'DRUGS-2_GTCCTGTT_L001_R2_001.fastq.gz']),
(u'Undetermined_indices',
[u'lane1_Undetermined_L001_R1_001.fastq.gz',
u'lane1_Undetermined_L001_R2_001.fastq.gz'])])
self.assertDictEqual(mapping, expected)
# test absolute_paths flag
mapping = get_sample_project_mapping(bcl2fastq_output_path,
absolute_paths=True)
for k, v in expected.items():
expected[k] = [path.join(bcl2fastq_output_path, f) for f in v]
self.assertDictEqual(mapping, expected)
# TODO: These times and RTA versions aren't actually consistent
# with the other times of the mock runs. Make them
# consistent.
def test_rta_complete_parser(self):
# 1.x = '9/7/2013,18:12:53.149,Illumina RTA 1.18.64'
date, version = rta_complete_parser(self.run1_dir)
self.assertEqual(date, datetime(2013, 9, 7, 18, 12, 53, 149000))
self.assertEqual(version, 'Illumina RTA 1.17.20.0')
# 2.x = 'RTA 2.7.3 completed on 9/7/2014 3:31:22 AM'
date, version = rta_complete_parser(self.run2_dir)
self.assertEqual(date, datetime(2014, 9, 7, 3, 31, 22))
self.assertEqual(version, 'RTA 2.7.3')
def test_parse_sample_info_from_filename(self):
fq_info = parse_sample_info_from_filename(
'DMSO-7_S7_L008_I2_001.fastq.gz')
self.assertEqual(fq_info.get('sample_name', None), 'DMSO-7')
self.assertEqual(fq_info.get('sample_number', None), 7)
self.assertEqual(fq_info.get('lane', None), 8)
self.assertEqual(fq_info.get('read', None), 2)
self.assertEqual(fq_info.get('set_number', None), 1)
class VersionTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_semver(self):
# parse the version number to ensure it's valid (invalid version number
# formats will raise a ValueError)
ingestor_version = SemanticVersion(mytardis_ngs_ingestor.__version__)
self.assertEqual(str(ingestor_version),
mytardis_ngs_ingestor.__version__)
if __name__ == '__main__':
unittest.main()
| |
# encoding: utf-8
# module _ctypes
# from /usr/lib/python2.7/lib-dynload/_ctypes.so
# by generator 1.130
""" Create and manipulate C compatible data types in Python. """
# no imports
# Variables with simple values
FUNCFLAG_CDECL = 1
FUNCFLAG_PYTHONAPI = 4
FUNCFLAG_USE_ERRNO = 8
FUNCFLAG_USE_LASTERROR = 16
RTLD_GLOBAL = 256
RTLD_LOCAL = 0
_cast_addr = 3070067376L
_memmove_addr = 3075058560L
_memset_addr = 3075043696L
_string_at_addr = 3070066336L
_wstring_at_addr = 3070065232L
__version__ = '1.1.0'
# functions
def addressof(C_instance): # real signature unknown; restored from __doc__
"""
addressof(C instance) -> integer
Return the address of the C instance internal buffer
"""
return 0
def alignment(C_type): # real signature unknown; restored from __doc__
"""
alignment(C type) -> integer
alignment(C instance) -> integer
Return the alignment requirements of a C instance
"""
return 0
def byref(C_instance, offset=0): # real signature unknown; restored from __doc__
"""
byref(C instance[, offset=0]) -> byref-object
Return a pointer lookalike to a C instance, only usable
as function argument
"""
pass
def call_cdeclfunction(*args, **kwargs): # real signature unknown
pass
def call_function(*args, **kwargs): # real signature unknown
pass
def dlclose(*args, **kwargs): # real signature unknown
""" dlclose a library """
pass
def dlopen(name, flag, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
""" dlopen(name, flag={RTLD_GLOBAL|RTLD_LOCAL}) open a shared library """
pass
def dlsym(*args, **kwargs): # real signature unknown
""" find symbol in shared library """
pass
def get_errno(*args, **kwargs): # real signature unknown
pass
def pointer(*args, **kwargs): # real signature unknown
pass
def POINTER(*args, **kwargs): # real signature unknown
pass
def PyObj_FromPtr(*args, **kwargs): # real signature unknown
pass
def Py_DECREF(*args, **kwargs): # real signature unknown
pass
def Py_INCREF(*args, **kwargs): # real signature unknown
pass
def resize(*args, **kwargs): # real signature unknown
""" Resize the memory buffer of a ctypes instance """
pass
def set_conversion_mode(encoding, errors): # real signature unknown; restored from __doc__
"""
set_conversion_mode(encoding, errors) -> (previous-encoding, previous-errors)
Set the encoding and error handling ctypes uses when converting
between unicode and strings. Returns the previous values.
"""
pass
def set_errno(*args, **kwargs): # real signature unknown
pass
def sizeof(C_type): # real signature unknown; restored from __doc__
"""
sizeof(C type) -> integer
sizeof(C instance) -> integer
Return the size in bytes of a C instance
"""
return 0
def _buffer_info(*args, **kwargs): # real signature unknown
""" Return buffer interface information (for testing only) """
pass
def _unpickle(*args, **kwargs): # real signature unknown
pass
# classes
class ArgumentError(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
class Array(_CData):
""" XXX to be provided """
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __delslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
def __setslice__(self, i, j, y): # real signature unknown; restored from __doc__
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
class CFuncPtr(_CData):
""" Function Pointer """
def __call__(self, *more): # real signature unknown; restored from __doc__
""" x.__call__(...) <==> x(...) """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
argtypes = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""specify the argument types"""
errcheck = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""a function to check for errors"""
restype = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""specify the result type"""
class Structure(_CData):
""" Structure base class """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class Union(_CData):
""" Union base class """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class _Pointer(_CData):
""" XXX to be provided """
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
contents = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the object this pointer points to (read-write)"""
class _SimpleCData(_CData):
""" XXX to be provided """
def __ctypes_from_outparam__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
value = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""current value"""
# variables with complex values
_pointer_type_cache = {
None: None, # (!) real value is ''
None: # (!) real value is ''
None # (!) real value is ''
,
None: # (!) real value is ''
None # (!) real value is ''
,
}
| |
import numpy as np
import pytest
import pandas as pd
from pandas import Index, MultiIndex, Series
import pandas.util.testing as tm
def test_equals(idx):
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(np.array(idx))
same_values = Index(idx, dtype=object)
assert idx.equals(same_values)
assert same_values.equals(idx)
if idx.nlevels == 1:
# do not test MultiIndex
assert not idx.equals(pd.Series(idx))
def test_equals_op(idx):
# GH9947, GH10637
index_a = idx
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = Series(array_b)
series_c = Series(array_c)
series_d = Series(array_d)
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_b
tm.assert_numpy_array_equal(index_a == series_a, expected1)
tm.assert_numpy_array_equal(index_a == series_c, expected2)
# cases where length is 1 for one of them
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_d
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
series_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
series_a == array_d
# comparing with a scalar should broadcast; note that we are excluding
# MultiIndex because in this case each item in the index is a tuple of
# length 2, and therefore is considered an array of length 2 in the
# comparison instead of a scalar
if not isinstance(index_a, MultiIndex):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
tm.assert_series_equal(series_a == item, Series(expected3))
def test_equals_multi(idx):
assert idx.equals(idx)
assert not idx.equals(idx.values)
assert idx.equals(Index(idx.values))
assert idx.equal_levels(idx)
assert not idx.equals(idx[:-1])
assert not idx.equals(idx[-1])
# different number of levels
index = MultiIndex(levels=[Index(list(range(4))),
Index(list(range(4))),
Index(list(range(4)))],
codes=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0])],
)
index2 = MultiIndex(levels=index.levels[:-1], codes=index.codes[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(list(range(4)))
minor_axis = Index(list(range(2)))
major_codes = np.array([0, 0, 1, 2, 2, 3])
minor_codes = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes])
assert not idx.equals(index)
assert not idx.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_codes = np.array([0, 0, 2, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes])
assert not idx.equals(index)
def test_identical(idx):
mi = idx.copy()
mi2 = idx.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_equals_operator(idx):
# GH9785
assert (idx == idx).all()
def test_equals_missing_values():
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_is_():
mi = MultiIndex.from_tuples(zip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([list(range(10)), list(range(10))])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([list(range(10)), list(range(10))], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_is_all_dates(idx):
assert not idx.is_all_dates
def test_is_numeric(idx):
# MultiIndex is never numeric
assert not idx.is_numeric()
def test_multiindex_compare():
# GH 21149
# Ensure comparison operations for MultiIndex with nlevels == 1
# behave consistently with those for MultiIndex with nlevels > 1
midx = pd.MultiIndex.from_product([[0, 1]])
# Equality self-test: MultiIndex object vs self
expected = pd.Series([True, True])
result = pd.Series(midx == midx)
tm.assert_series_equal(result, expected)
# Greater than comparison: MultiIndex object vs self
expected = pd.Series([False, False])
result = pd.Series(midx > midx)
tm.assert_series_equal(result, expected)
| |
# Django settings for fle_site project.
import os
try:
from local_settings import *
import local_settings
except ImportError:
local_settings = {}
def localor(setting_name, default_val):
"""Returns local_settings version if it exists (and is non-empty), otherwise uses default value"""
return hasattr(local_settings, setting_name) and getattr(local_settings, setting_name) or default_val
DEBUG = getattr(local_settings, "DEBUG", False)
TEMPLATE_DEBUG = getattr(local_settings, "TEMPLATE_DEBUG", DEBUG)
#retrieve Constantcontact info from local_settings
CONSTANT_CONTACT_API_KEY = getattr(local_settings, "CONSTANT_CONTACT_API_KEY", 'api-key-not-found')
CONSTANT_CONTACT_ACCESS_TOKEN = getattr(local_settings, "CONSTANT_CONTACT_ACCESS_TOKEN", 'access-token-not-found')
CONSTANT_CONTACT_API_URL = getattr(local_settings, "CONSTANT_CONTACT_API_URL", 'api-url-not-found')
CONSTANT_CONTACT_LIST_ID = getattr(local_settings, "CONSTANT_CONTACT_LIST_ID", 'list-id-not-found')
ADMINS = (
# ('Dylan', 'dylan@learningequality.org'),
)
MANAGERS = ADMINS
PROJECT_PATH = os.path.dirname(os.path.realpath(__file__))
# To render map, define GEOIPDAT and IPS_FILEPATH in local_settings.py ex:
# import os
# DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "static/data/")
# GEOIPDAT = os.path.join(DATA_PATH, "GeoLiteCity.dat")
# IPS_FILEPATH = os.path.join(DATA_PATH, "ips.txt")
LOCATIONS_JSONP_URL = getattr(local_settings, "LOCATIONS_JSONP_URL", "https://kalite.learningequality.org/media/locations/locations.jsonp")
DATABASES = localor("DATABASES", {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_PATH, '..', 'database.sqlite'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
})
#GEO IP Data
GEO_IP_DOWNLOAD_URL = getattr(local_settings, "GEO_IP_DOWNLOAD_URL", "http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz")
GEO_IP_DATA_PATH = getattr(local_settings, "GEO_IP_DATA_PATH", os.path.join(PROJECT_PATH, "..", "data", "GeoLiteCity.dat"))
ISO_COUNTRY_LIST_DATA_PATH = getattr(local_settings, "ISO_COUNTRY_LIST_DATA_PATH", os.path.join(PROJECT_PATH, "..", "data", "country-list-iso-codes.txt"))
INTERNAL_IPS = getattr(local_settings, "INTERNAL_IPS", ("127.0.0.1",))
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
MEDIA_URL = getattr(local_settings, "MEDIA_URL", "/media/")
MEDIA_ROOT = os.path.realpath(getattr(local_settings, "MEDIA_ROOT", os.path.join(PROJECT_PATH, "media"))) + "/"
STATIC_URL = getattr(local_settings, "STATIC_URL", "/static/")
STATIC_ROOT = os.path.realpath(getattr(local_settings, "STATIC_ROOT",os.path.join(PROJECT_PATH, "..", "_static_cache"))) + "/"
ALLOWED_HOSTS = ["*"]
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_PATH, "static"),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = getattr(local_settings, "SECRET_KEY", "@$=b3-wk2zv9oy_8dk))q_9h45pp*o=ntyh!_3bd-13p5761f%")
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'fle_site.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'fle_site.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'fle_site.apps.main.custom_context_processors.debug',
'django.contrib.messages.context_processors.messages',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.humanize',
'django_extensions',
'fack',
'south',
'file_resubmit',
'fle_site.apps.articles',
'fle_site.apps.main',
'fle_site.apps.about',
'fle_site.apps.ka_lite',
'fle_site.apps.redirects',
'ckeditor',
'ckeditor_uploader',
)
CKEDITOR_UPLOAD_PATH = '/fileupload/'
CKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'
CKEDITOR_CONFIGS = {
'default': {
'skin': 'moono',
# 'skin': 'office2013',
'toolbar_Basic': [
['Source', '-', 'Bold', 'Italic']
],
'toolbar_YouCustomToolbarConfig': [
{'name': 'document', 'items': ['Source']},
{'name': 'clipboard', 'items': ['Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-', 'Undo', 'Redo']},
{'name': 'basicstyles',
'items': ['Bold', 'Italic', 'Underline', 'Strike', 'Subscript', 'Superscript', '-', 'RemoveFormat']},
{'name': 'paragraph',
'items': ['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Blockquote', 'CreateDiv', '-',
'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock', '-', 'BidiLtr', 'BidiRtl',
'Language']},
{'name': 'links', 'items': ['Link', 'Unlink', 'Anchor']},
{'name': 'insert',
'items': ['Image', 'Table', 'HorizontalRule', 'SpecialChar', 'PageBreak']},
'/',
{'name': 'styles', 'items': ['Styles', 'Format', 'Font', 'FontSize']},
{'name': 'colors', 'items': ['TextColor', 'BGColor']},
{'name': 'tools', 'items': ['Maximize', 'ShowBlocks']},
],
'toolbar': 'YouCustomToolbarConfig', # put selected toolbar config here
'tabSpaces': 4,
'uploadUrl': CKEDITOR_UPLOAD_PATH,
'extraPlugins': ','.join(
[
# you extra plugins here
'uploadimage',
]),
}
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Django-articles settings
DISQUS_USER_API_KEY = localor("DISQUS_USER_API_KEY", "")
DISQUS_FORUM_SHORTNAME = "learningequality"
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
"file_resubmit": {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
"LOCATION": '/tmp/file_resubmit/'
},
}
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
STRIPE_SECRET_API_KEY = getattr(local_settings, "STRIPE_SECRET_API_KEY", "")
STRIPE_PUBLISHABLE_API_KEY = getattr(local_settings, "STRIPE_PUBLISHABLE_API_KEY", "")
| |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 29 17:05:54 2015
@author: anderson
"""
import numpy as np
from copy import copy
from DataObj import *
from EventList import *
from SpikeObj import *
from pyhfo.io.read_header import read_header
from aux_func import Timer
import os
import matlab.engine
import scipy.io as sio
import shutil
import h5py
import sys
import scipy.signal as sig
def getSpike_from_DAT(folder,fname,ch,clus_folder,SPK,max_points = 90000000 ):
fh = open(folder+fname,'r')
fh.seek(0)
data = np.fromfile(fh, dtype=np.short, count=-1)
fh.close()
clear_clus_folder(clus_folder)
name = fname[:-4]
data = np.double(data)
f = open(clus_folder + 'Files.txt', 'w')
f.write(name +'\n')
if data.shape[0]>max_points:
nseg = 10
for j in range(nseg):
tsmin = int((j)*np.floor(data.shape[0]/nseg))
tsmax = int((j+1)*np.floor(data.shape[0]/nseg))
sio.savemat(clus_folder+name+'_'+str(j)+'.mat', {'data':data[tsmin:tsmax]})
f.write(name+'_'+str(j) +'\n')
else:
sio.savemat(clus_folder+name+'.mat', {'data':data})
f.close()
eng = matlab.engine.start_matlab()
eng.cd(clus_folder,nargout=0)
eng.Get_spikes(nargout=0)
eng.Do_clustering(nargout=0)
move_figs(folder,clus_folder)
eng.close('all', nargout=0)
eng.quit()
fname = clus_folder + 'times_' + name +'.mat'
if os.path.isfile(fname):
SPK = loadSPK_waveclus(fname,SPK,ch)
return SPK
def move_figs(folder,clus_folder):
os.chdir(clus_folder)
filelist = [ f for f in os.listdir(".") if f.endswith(".jpg") ]
for f in filelist:
shutil.move(clus_folder+f,folder+f)
filelist = [ f for f in os.listdir(".") if f.endswith(".mat") ]
for f in filelist:
shutil.move(clus_folder+f,folder+f)
filelist = [ f for f in os.listdir(".") if f.endswith(".mat500") ]
for f in filelist:
shutil.move(clus_folder+f,folder+f)
def get_len(folder,fname):
fh = open(folder+fname,'r')
fh.seek(0)
data = np.fromfile(fh, dtype=np.short, count=-1)
fh.close()
return data.shape[0]
def loadSPK_waveclus(filename,EventList,ch):
'''
load Spikes sorted by wave_clus.
Parameters
----------
filename: str
Name of the spike (.mat) file
EventList: EventList
'''
mat = sio.loadmat(filename, struct_as_record=False, squeeze_me=True)
if mat['cluster_class'].size > 0:
clusters = mat['cluster_class'][:,0]
times = mat['cluster_class'][:,1]/1000
spikes = mat['spikes']
features = mat['inspk']
labels = []
for cl in range(int(max(clusters))+1):
labels.append('Cluster '+str(cl))
for idx,waveform in enumerate(spikes):
tstamp = times[idx]
clus = clusters[idx]
feat= features[idx]
time_edge = [-20,44]
spk = SpikeObj(ch,waveform,tstamp,clus,feat,time_edge)
EventList.__addEvent__(spk)
return EventList
def clear_clus_folder(clus_folder):
os.chdir(clus_folder)
filelist = [ f for f in os.listdir(".") if f.endswith(".mat") ]
for f in filelist:
os.remove(f)
filelist = [ f for f in os.listdir(".") if f.endswith(".mat500") ]
for f in filelist:
os.remove(f)
filelist = [ f for f in os.listdir(".") if f.endswith(".mat400") ]
for f in filelist:
os.remove(f)
filelist = [ f for f in os.listdir(".") if f.endswith(".lab") ]
for f in filelist:
os.remove(f)
filelist = [ f for f in os.listdir(".") if f.endswith("01") ]
for f in filelist:
os.remove(f)
filelist = [ f for f in os.listdir(".") if f.endswith("run") ]
for f in filelist:
os.remove(f)
def get_info(folder):
# load file
print folder+'info.rhd'
myData = RHD.openRhd(folder+'info.rhd')
# get sample rate
sys.stdout.flush()
return myData
def openDATfile(filename,ftype,srate=25000):
fh = open(filename,'r')
fh.seek(0)
if ftype == 'amp':
data = np.fromfile(fh, dtype=np.int16)
fh.close()
data = np.double(data)
data *= 0.195 # according the Intan, the output should be multiplied by 0.195 to be converted to micro-volts
elif ftype == 'adc':
data = np.fromfile(fh, dtype=np.uint16)
fh.close()
data = np.double(data)
data *= 0.000050354 # according the Intan, the output should be multiplied by 0.195 to be converted to micro-volts
data -= np.mean(data)
elif ftype == 'aux':
data = np.fromfile(fh, dtype=np.uint16)
fh.close()
data = np.double(data)
data *= 0.0000748 # according the Intan, the output should be multiplied by 0.195 to be converted to micro-volts
elif ftype == 'time':
data = np.fromfile(fh, dtype=np.int32)
fh.close()
data = np.double(data)
data /= srate # according the Intan, the output should be multiplied by 0.195 to be converted to micro-volts
return data
def loadITANfolder(folder,q=25):
with Timer(folder):
#get files in the folder
files = getFileList(folder)
# load info
fid = open(folder+files['info'][0], 'rb')
info = read_header(fid)
sys.stdout.flush()
#Sampling Rate
sample_rate = info['sample_rate']
time_vec = openDATfile(folder+files['time'][0],'time',sample_rate)
time_vec = time_vec[0:-1:q]
amp_unit = '$\mu V$'
labels = []
nch = len(files['amp']) + len(files['adc']) # +len(files['aux'])
data = np.zeros([time_vec.shape[0],nch])
eng = matlab.engine.start_matlab()
clus_folder = eng.which('Get_spikes')
clus_folder = clus_folder[:-12]
eng.cd(clus_folder,nargout=0)
count = 0
for f in files['amp']:
sys.stdout.flush()
with Timer(f):
name = f[:-4]
labels.append(name)
aux_data = openDATfile(folder+f,'amp',sample_rate)
tfile = open(clus_folder + 'Files.txt', 'w')
tfile.write(name +'\n')
tfile.close()
sio.savemat(clus_folder+name+'.mat', {'data':aux_data})
eng.Get_spikes_alt(sample_rate,nargout=0)
eng.close('all', nargout=0)
move_figs(folder,clus_folder)
data[:,count] = sig.decimate(aux_data,q)
count +=1
eng.cd(folder,nargout=0)
eng.save_NEX(sample_rate,labels,nargout=0)
eng.save_NEX2(sample_rate,labels,int(time_vec[0]),nargout=0)
for f in files['adc']:
sys.stdout.flush()
with Timer(f):
labels.append(f[:-4])
aux_data = openDATfile(folder+f,'adc',sample_rate)
data[:,count] = sig.decimate(aux_data,q)
count +=1
# for f in files['aux']:
# print '.',
# sys.stdout.flush()
# labels.append(f[:-4])
# data[:,count] = openDATfile(folder+f,'aux',edge=edge)
# count +=1
Data = DataObj(data,sample_rate/q,amp_unit,labels,time_vec,[])
Data.save(folder+'downsampled.h5','data')
eng.quit()
return Data
def getFileList(folder):
filelist = os.listdir(folder)
label_amp = [f for f in filelist if f.startswith('amp') and f.endswith('.dat')]
label_amp.sort()
label_ADC = [f for f in filelist if f.startswith('board-ADC') and f.endswith('.dat')]
label_ADC.sort()
label_aux = [f for f in filelist if f.startswith('aux') and f.endswith('.dat')]
label_aux.sort()
label_vdd = [f for f in filelist if f.startswith('vdd') and f.endswith('.dat')]
label_vdd.sort()
label_info = [f for f in filelist if f.startswith('info') and f.endswith('.rhd')]
label_info.sort()
label_time = [f for f in filelist if f.startswith('time') and f.endswith('.dat')]
label_time.sort()
dic_labels = {'amp':label_amp, 'adc':label_ADC, 'aux':label_aux, 'vdd':label_vdd, 'info':label_info, 'time':label_time}
return dic_labels
def open_file_DAT(folder,ports,nchans,srate,bsize=None,starttime = 0):
if bsize == None:
bsize = srate
nch = sum(nchans)
data = np.zeros([bsize,nch])
count = 0
labels = []
for p in range(len(ports)):
root = 'amp-'+ports[p]+'-'
for ch in range(nchans[p]):
x = str(ch)
while len(x)<3:
x = '0' + x
fname = root + x + '.dat'
labels.append(fname)
fh = open(folder+fname,'r')
fh.seek(np.round(starttime*srate)*2)
data[:,count] = np.fromfile(fh, dtype=np.short, count=bsize)
count +=1
fh.close()
data *= 0.195 # according the Intan, the output should be multiplied by 0.195 to be converted to micro-volts
amp_unit = '$\mu V$'
# Time vector
n_points = data.shape[0]
end_time = n_points/srate
time_vec = np.linspace(0,end_time,n_points,endpoint=False)
Data = DataObj(data,srate,amp_unit,labels,time_vec,[])
return Data
def getDATduration(folder,port,ch):
root = 'amp-'+port+'-'
x = str(ch)
while len(x)<3:
x = '0' + x
fname = root + x + '.dat'
fh = open(folder+fname,'r')
data = np.fromfile(fh, dtype=np.short, count=-1)
return data.shape[0]
def pca2(data):
npoint,ch = data.shape
Mn = np.mean(data,0)
for i in range(ch):
data[:,i] = data[:,i] - Mn[i]
C = np.cov(data.T)
a1 = np.zeros(ch)
a2 = np.zeros(ch)
#art1 = np.zeros(npoint)
#art2 = np.zeros(npoint)
pcadata = np.zeros([npoint,ch])
for i in range(ch):
j = [x for x in range(ch) if x is not i]
noti = data[:,j]
Cnoti = C[np.ix_(j,j)]
w,d = np.linalg.eig(Cnoti)
k = np.argsort(w)
d = d[k]
v = np.identity(ch-1)*w
v = v[k]
v= v[:,-2:]
pc = np.dot(noti,v)
pc = np.append(pc,data[:,i][...,None],1)
Cpc = np.cov(pc.T)
a1[i] = Cpc[0,2]/Cpc[0,0]
a2[i] = Cpc[1,2]/Cpc[1,1]
#art1 += a1[i]*pc[:,0]
#art2 += a2[i]*pc[:,1]
pcadata[:,i] = data[:,i] - a1[i]*pc[:,0] - a2[i]*pc[:,1]
#art1 /= ch
#art2 /= ch
#pca12 = np.append(art1[...,None],art2[...,None],1)
return pcadata
def FindBigStuff(data,xsd =3,sd_method = 'Quian'):
#s = np.std(data,0) * xsd
#print s
spikelist = np.array([0,0,0])[None,...]
m,n = data.shape
s = np.zeros(n)
for i in range(n):
x = data[:,i]
if sd_method == 'Quian':
s[i] = xsd * np.median(np.abs(x)) / 0.6745
elif sd_method == 'STD':
s[i] = np.std(x) * xsd
taux = np.diff(np.where(abs(x)>s[i],1,0))
times = np.nonzero(taux==1)[0]
times2 = np.nonzero(taux==-1)[0]
if len(times) !=0:
if len(times)-1 == len(times2):
times2 = np.append(times2,m)
elif len(times) == len(times2)-1:
times = np.append(0,times)
chs = np.ones(times.shape)*i
aux = np.append(chs[...,None],times[...,None],1)
aux = np.append(aux,times2[...,None],1)
spikelist = np.append(spikelist,aux,0)
return np.delete(spikelist, (0), axis=0),s
def ReplaceBigStuff(data,biglist,replacearray,postpts = 10,prepts = 10):
NoSpikesData = copy(data)
for ch,atime,btime in biglist:
if atime - prepts > 0:
a = prepts
else:
a = atime-1
if btime + postpts < data.shape[0]:
b = postpts
else:
b = data.shape[0] - btime
NoSpikesData[int(atime-a):int(btime+b),int(ch)] = replacearray[int(atime-a):int(btime+b),int(ch)]
return NoSpikesData
def clearData(Data,ptspercut,postpts = 10,prepts = 10,xsd =3):
data = Data.data
m,n = data.shape
if n>m:
data = data.T
m,n = data.shape
last = m/ptspercut
cleared = copy(data)
for ci in range(last):
if (ci+1)*ptspercut > m:
stop = m
else:
stop = (ci+1)*ptspercut
start = ci * ptspercut
tdata = data[np.arange(start, stop),:,]
pcadata = pca2(tdata)
noiseEst = tdata - pcadata
biglist,s = FindBigStuff(pcadata,xsd =3)
replacearray = np.zeros(tdata.shape)
NoSpikesData = ReplaceBigStuff(tdata,biglist,replacearray,postpts,prepts)
pcadata = pca2(NoSpikesData)
noiseEst = NoSpikesData - pcadata
replacearray = noiseEst
NoSpikesData = ReplaceBigStuff(tdata,biglist,replacearray,postpts,prepts)
pcadata = pca2(NoSpikesData)
cleared[np.arange(start, stop),:,] = tdata - pcadata
Cleared = DataObj(cleared,Data.sample_rate,Data.amp_unit,Data.ch_labels,Data.time_vec,[])
return Cleared
def GetSpike(Data,ptspercut=None,xsd=3,postpts = 10,prepts = 10, min_sep = None,sd_method = 'Quian'):
time_edge = np.array([-prepts, postpts]) / float(Data.sample_rate)
if min_sep is None:
min_sep = float(prepts+postpts)/Data.sample_rate
Spikes = EventList(Data.ch_labels,(Data.time_vec[0],Data.time_vec[-1]))
data = Data.data
m,n = data.shape
if n>m:
data = data.T
m,n = data.shape
if ptspercut is None:
ptspercut = Data.sample_rate
last = m/ptspercut
for ci in range(last):
if (ci+1)*ptspercut > m:
stop = m
else:
stop = (ci+1)*ptspercut
start = ci * ptspercut
tdata = data[np.arange(start, stop),:,]
biglist,ths = FindBigStuff(tdata,xsd =xsd,sd_method=sd_method)
if biglist.shape[0]>0:
ch_a = biglist[0,0]
for ch,a,b in biglist:
a += start
b += start
if ch == ch_a:
tn = start
tn /= float(Data.sample_rate)
ch_a += 1
if a - prepts > 0:
pass
else:
continue
if b + postpts < Data.data.shape[0]:
pass
else:
continue
aux = Data.data[int(a-prepts):int(b+postpts),int(ch)]
#aux_idx = atime-a + np.argmax(abs(aux))
#print np.nonzero(np.diff(np.where(abs(aux) > ths[int(ch)],1,0))==1)[0][0]
#aux_idx = a-prepts + np.nonzero(abs(aux) > ths[int(ch)])[0][0]
if len(np.nonzero(np.diff(np.where(abs(aux) > ths[int(ch)],1,0))==1)[0]) ==0:
a -= prepts
aux = Data.data[int(a-prepts):int(b+postpts),int(ch)]
aux_idx = a-prepts + np.nonzero(np.diff(np.where(abs(aux) > ths[int(ch)],1,0))==1)[0][0]
waveform = Data.data[int(aux_idx-prepts):int(aux_idx+postpts),int(ch)]
if waveform.shape[0] != postpts+prepts:
continue
tstamp = aux_idx/Data.sample_rate
if tstamp - tn < min_sep:
#print ch, tstamp,tn, tstamp-tn, min_sep
continue
tn = tstamp
clus = 0
feat = 0
spk = SpikeObj(ch,waveform,tstamp,clus,feat,time_edge)
Spikes.__addEvent__(spk)
return Spikes
| |
"""Games, or Adversarial Search. (Chapter 5)
"""
from . utils import *
import random
#______________________________________________________________________________
# Minimax Search
def minimax_decision(state, game):
"""Given a state in a game, calculate the best move by searching
forward all the way to the terminal states. [Fig. 5.3]"""
player = game.to_move(state)
def max_value(state):
if game.terminal_test(state):
return game.utility(state, player)
v = -infinity
for a in game.actions(state):
v = max(v, min_value(game.result(state, a)))
return v
def min_value(state):
if game.terminal_test(state):
return game.utility(state, player)
v = infinity
for a in game.actions(state):
v = min(v, max_value(game.result(state, a)))
return v
# Body of minimax_decision:
return argmax(game.actions(state),
lambda a: min_value(game.result(state, a)))
#______________________________________________________________________________
def alphabeta_full_search(state, game):
"""Search game to determine best action; use alpha-beta pruning.
As in [Fig. 5.7], this version searches all the way to the leaves."""
player = game.to_move(state)
# Functions used by alphabeta
def max_value(state, alpha, beta):
if game.terminal_test(state):
return game.utility(state, player)
v = -infinity
for a in game.actions(state):
v = max(v, min_value(game.result(state, a), alpha, beta))
if v >= beta:
return v
alpha = max(alpha, v)
return v
def min_value(state, alpha, beta):
if game.terminal_test(state):
return game.utility(state, player)
v = infinity
for a in game.actions(state):
v = min(v, max_value(game.result(state, a), alpha, beta))
if v <= alpha:
return v
beta = min(beta, v)
return v
# Body of alphabeta_search:
return max_value(state, -infinity, infinity)
def alphabeta_search(state, game, d=4, cutoff_test=None, eval_fn=None):
"""Search game to determine best action; use alpha-beta pruning.
This version cuts off search and uses an evaluation function."""
player = game.to_move(state)
# Functions used by alphabeta
def max_value(state, alpha, beta, depth):
if cutoff_test(state, depth):
return eval_fn(state)
v = -infinity
for a in game.actions(state):
v = max(v, min_value(game.result(state, a),
alpha, beta, depth+1))
if v >= beta:
return v
alpha = max(alpha, v)
return v
def min_value(state, alpha, beta, depth):
if cutoff_test(state, depth):
return eval_fn(state)
v = infinity
for a in game.actions(state):
v = min(v, max_value(game.result(state, a),
alpha, beta, depth+1))
if v <= alpha:
return v
beta = min(beta, v)
return v
# Body of alphabeta_search starts here:
# The default test cuts off at depth d or at a terminal state
cutoff_test = (cutoff_test or
(lambda state, depth: depth > d or game.terminal_test(state)))
eval_fn = eval_fn or (lambda state: game.utility(state, player))
return max_value(state, -infinity, infinity, 0)
#______________________________________________________________________________
# Players for Games
def query_player(game, state):
"Make a move by querying standard input."
game.display(state)
return num_or_str(eval(input('Your move? ')))
def random_player(game, state):
"A player that chooses a legal move at random."
return random.choice(game.actions(state))
def alphabeta_player(game, state):
return alphabeta_search(state, game)
def play_game(game, *players):
"""Play an n-person, move-alternating game.
>>> play_game(Fig52Game(), alphabeta_player, alphabeta_player)
3
"""
state = game.initial
while True:
for player in players:
move = player(game, state)
state = game.result(state, move)
if game.terminal_test(state):
return game.utility(state, game.to_move(game.initial))
#______________________________________________________________________________
# Some Sample Games
class Game:
"""A game is similar to a problem, but it has a utility for each
state and a terminal test instead of a path cost and a goal
test. To create a game, subclass this class and implement actions,
result, utility, and terminal_test. You may override display and
successors or you can inherit their default methods. You will also
need to set the .initial attribute to the initial state; this can
be done in the constructor."""
def actions(self, state):
"Return a list of the allowable moves at this point."
raise NotImplementedError
def result(self, state, move):
"Return the state that results from making a move from a state."
raise NotImplementedError
def utility(self, state, player):
"Return the value of this final state to player."
raise NotImplementedError
def terminal_test(self, state):
"Return True if this is a final state for the game."
return not self.actions(state)
def to_move(self, state):
"Return the player whose move it is in this state."
return state.to_move
def display(self, state):
"Print or otherwise display the state."
print(state)
def __repr__(self):
return '<%s>' % self.__class__.__name__
class Fig52Game(Game):
"""The game represented in [Fig. 5.2]. Serves as a simple test case.
>>> g = Fig52Game()
>>> minimax_decision('A', g)
'a1'
>>> alphabeta_full_search('A', g)
'a1'
>>> alphabeta_search('A', g)
'a1'
"""
succs = dict(A=dict(a1='B', a2='C', a3='D'),
B=dict(b1='B1', b2='B2', b3='B3'),
C=dict(c1='C1', c2='C2', c3='C3'),
D=dict(d1='D1', d2='D2', d3='D3'))
utils = dict(B1=3, B2=12, B3=8, C1=2, C2=4, C3=6, D1=14, D2=5, D3=2)
initial = 'A'
def actions(self, state):
return list(self.succs.get(state, {}).keys())
def result(self, state, move):
return self.succs[state][move]
def utility(self, state, player):
if player == 'MAX':
return self.utils[state]
else:
return -self.utils[state]
def terminal_test(self, state):
return state not in ('A', 'B', 'C', 'D')
def to_move(self, state):
return ('MIN' if state in 'BCD' else 'MAX')
class TicTacToe(Game):
"""Play TicTacToe on an h x v board, with Max (first player) playing 'X'.
A state has the player to move, a cached utility, a list of moves in
the form of a list of (x, y) positions, and a board, in the form of
a dict of {(x, y): Player} entries, where Player is 'X' or 'O'."""
def __init__(self, h=3, v=3, k=3):
update(self, h=h, v=v, k=k)
moves = [(x, y) for x in range(1, h+1)
for y in range(1, v+1)]
self.initial = Struct(to_move='X', utility=0, board={}, moves=moves)
def actions(self, state):
"Legal moves are any square not yet taken."
return state.moves
def result(self, state, move):
if move not in state.moves:
return state # Illegal move has no effect
board = state.board.copy()
board[move] = state.to_move
moves = list(state.moves)
moves.remove(move)
return Struct(to_move=('O' if state.to_move == 'X' else 'X'),
utility=self.compute_utility(board, move, state.to_move),
board=board, moves=moves)
def utility(self, state, player):
"Return the value to player; 1 for win, -1 for loss, 0 otherwise."
return (state.utility if player == 'X' else -state.utility)
def terminal_test(self, state):
"A state is terminal if it is won or there are no empty squares."
return state.utility != 0 or len(state.moves) == 0
def display(self, state):
board = state.board
for x in range(1, self.h+1):
for y in range(1, self.v+1):
print(board.get((x, y), '.'), end=' ')
print()
def compute_utility(self, board, move, player):
"If X wins with this move, return 1; if O return -1; else return 0."
if (self.k_in_row(board, move, player, (0, 1)) or
self.k_in_row(board, move, player, (1, 0)) or
self.k_in_row(board, move, player, (1, -1)) or
self.k_in_row(board, move, player, (1, 1))):
return (+1 if player == 'X' else -1)
else:
return 0
def k_in_row(self, board, move, player, xxx_todo_changeme):
"Return true if there is a line through move on board for player."
(delta_x, delta_y) = xxx_todo_changeme
x, y = move
n = 0 # n is number of moves in row
while board.get((x, y)) == player:
n += 1
x, y = x + delta_x, y + delta_y
x, y = move
while board.get((x, y)) == player:
n += 1
x, y = x - delta_x, y - delta_y
n -= 1 # Because we counted move itself twice
return n >= self.k
class ConnectFour(TicTacToe):
"""A TicTacToe-like game in which you can only make a move on the bottom
row, or in a square directly above an occupied square. Traditionally
played on a 7x6 board and requiring 4 in a row."""
def __init__(self, h=7, v=6, k=4):
TicTacToe.__init__(self, h, v, k)
def actions(self, state):
return [(x, y) for (x, y) in state.moves
if y == 1 or (x, y-1) in state.board]
__doc__ += """
Random tests:
>>> play_game(Fig52Game(), random_player, random_player)
6
>>> play_game(TicTacToe(), random_player, random_player)
0
"""
| |
from __future__ import unicode_literals
import logging
import os
from django.utils.translation import ugettext_lazy as _
import paramiko
from reviewboard.ssh.errors import MakeSSHDirError, UnsupportedSSHKeyError
class SSHStorage(object):
def __init__(self, namespace=None):
self.namespace = namespace
def read_user_key(self):
"""Reads the user key.
This will return an instance of :py:mod:`paramiko.PKey` representing
the user key, if one exists. Otherwise, it will return None.
"""
raise NotImplementedError
def write_user_key(self, key):
"""Writes a user key.
The user key will be stored, and can be accessed later by
read_user_key.
This will raise UnsupportedSSHKeyError if ``key`` isn't a
:py:mod:`paramiko.RSAKey` or :py:mod:`paramiko.DSSKey`.
It may also raise :py:mod:`paramiko.SSHException` for key-related
errors.
"""
raise NotImplementedError
def delete_user_key(self, key):
"""Deletes a user key.
The user key, if it exists, will be removed from storage.
If no user key exists, this will do nothing.
"""
raise NotImplementedError
def read_authorized_keys(self):
"""Reads a list of authorized keys.
The authorized keys are returned as a list of raw key data, which
can then be converted into classes as needed.
"""
raise NotImplementedError
def read_host_keys(self):
"""Reads a list of known host keys.
This known host keys are returned as a list of raw key data, which
can then be converted into classes as needed.
"""
raise NotImplementedError
def add_host_key(self, hostname, key):
"""Adds a known key for a given host.
This will store a mapping of the key and hostname so that future
access to the server will know the host is legitimate.
"""
raise NotImplementedError
def replace_host_key(self, hostname, old_key, new_key):
"""Replaces a host key in the known hosts list with another.
This is used for replacing host keys that have changed.
"""
raise NotImplementedError
class FileSSHStorage(SSHStorage):
DEFAULT_KEY_FILES = (
(paramiko.RSAKey, 'id_rsa'),
(paramiko.DSSKey, 'id_dsa'),
)
SSH_DIRS = ('.ssh', 'ssh')
_ssh_dir = None
def get_user_key_info(self):
for cls, filename in self.DEFAULT_KEY_FILES:
# Paramiko looks in ~/.ssh and ~/ssh, depending on the platform,
# so check both.
for sshdir in self.SSH_DIRS:
path = os.path.join(self.get_ssh_dir(sshdir), filename)
if os.path.isfile(path):
return cls, path
return None, None
def read_user_key(self):
cls, path = self.get_user_key_info()
if path:
return cls.from_private_key_file(path)
return None
def write_user_key(self, key):
key_filename = None
for cls, filename in self.DEFAULT_KEY_FILES:
if isinstance(key, cls):
key_filename = filename
if not key_filename:
raise UnsupportedSSHKeyError()
sshdir = self.ensure_ssh_dir()
filename = os.path.join(sshdir, key_filename)
key.write_private_key_file(filename)
def delete_user_key(self):
cls, path = self.get_user_key_info()
if path:
# Allow any exceptions to bubble up.
os.unlink(path)
def read_authorized_keys(self):
filename = os.path.join(self.get_ssh_dir(), 'authorized_keys')
try:
fp = open(filename, 'r')
lines = fp.readlines()
fp.close()
return lines
except IOError as e:
logging.warning('Unable to read SSH authorized_keys file %s: %s'
% (filename, e))
raise
def read_host_keys(self):
filename = self.get_host_keys_filename()
lines = []
if os.path.exists(filename):
try:
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if line and line[0] != '#':
lines.append(line)
except IOError as e:
logging.error('Unable to read host keys file %s: %s'
% (filename, e))
return lines
def add_host_key(self, hostname, key):
self.ensure_ssh_dir()
filename = self.get_host_keys_filename()
try:
with open(filename, 'a') as fp:
fp.write('%s %s %s\n' % (hostname, key.get_name(),
key.get_base64()))
except IOError as e:
raise IOError(
_('Unable to write host keys file %(filename)s: %(error)s') % {
'filename': filename,
'error': e,
})
def replace_host_key(self, hostname, old_key, new_key):
filename = self.get_host_keys_filename()
if not os.path.exists(filename):
self.add_host_key(hostname, new_key)
return
try:
with open(filename, 'r') as fp:
lines = fp.readlines()
old_key_base64 = old_key.get_base64()
except IOError as e:
raise IOError(
_('Unable to read host keys file %(filename)s: %(error)s') % {
'filename': filename,
'error': e,
})
try:
with open(filename, 'w') as fp:
for line in lines:
parts = line.strip().split(" ")
if parts[-1] == old_key_base64:
parts[1] = new_key.get_name()
parts[-1] = new_key.get_base64()
fp.write(' '.join(parts) + '\n')
except IOError as e:
raise IOError(
_('Unable to write host keys file %(filename)s: %(error)s') % {
'filename': filename,
'error': e,
})
def get_host_keys_filename(self):
"""Returns the path to the known host keys file."""
return os.path.join(self.get_ssh_dir(), 'known_hosts')
def get_ssh_dir(self, ssh_dir_name=None):
"""Returns the path to the SSH directory on the system.
By default, this will attempt to find either a .ssh or ssh directory.
If ``ssh_dir_name`` is specified, the search will be skipped, and we'll
use that name instead.
"""
path = self._ssh_dir
if not path or ssh_dir_name:
path = os.path.expanduser('~')
if not ssh_dir_name:
ssh_dir_name = None
for name in self.SSH_DIRS:
if os.path.exists(os.path.join(path, name)):
ssh_dir_name = name
break
if not ssh_dir_name:
ssh_dir_name = self.SSH_DIRS[0]
path = os.path.join(path, ssh_dir_name)
if not ssh_dir_name:
self.__class__._ssh_dir = path
if self.namespace:
return os.path.join(path, self.namespace)
else:
return path
def ensure_ssh_dir(self):
"""Ensures the existance of the .ssh directory.
If the directory doesn't exist, it will be created.
The full path to the directory will be returned.
Callers are expected to handle any exceptions. This may raise
IOError for any problems in creating the directory.
"""
sshdir = self.get_ssh_dir()
if not os.path.exists(sshdir):
try:
os.makedirs(sshdir, 0o700)
except OSError:
raise MakeSSHDirError(sshdir)
return sshdir
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""problem3.py: This module implements a small Classification in sklearn (edx.org - ColumbiaX - AI Week 7)"""
__author__ = "Selam Getachew Woldetsadick"
__copyright__ = "Copyright (c) 2017 Selam Getachew Woldetsadick"
def features_labels_extractor(data):
"""
This function takes a Pandas representation of data, and transforms it to Numpy representation, while splitting data
into features and labels arrays
:param data: A Pandas dataframe
:return: A tuple of two elements respectively features and labels arrays
"""
features = data.as_matrix(columns=['A', 'B'])
labels = data.as_matrix(columns=['label'])
labels = labels.flatten()
return features, labels
def csv_dataset_reader(path="./input3.csv"):
"""
This function reads a csv from a specified path and returns a Pandas dataframe representation of it.
:param path: Path to and name of the csv file to read (default:"./input3.csv")
:return: A Pandas dataframe
"""
import pandas as pd
data = pd.read_csv(path, sep=",", header="infer")
return data
def train_test_splitter(features, labels):
"""
This function splits data into training (60%) and testing (40%), making sure of stratified sampling
:param features: A set of features in Numpy array format
:param labels: A set of labels in Numpy array format
:return: a four elements tuples respectively training features, testing features, training labels and testing labels
"""
from sklearn.model_selection import train_test_split
f_train, f_test, l_train, l_test = train_test_split(features, labels, test_size=0.4, random_state=10,
stratify=labels)
return f_train, f_test, l_train, l_test
def graph_3d_my_data(data):
"""
This function 3D graphs my data.
:param data: Graphed data with three columns
:return: None
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
n = data.shape[0]
for i in range(0, n):
xs = data['A'][i]
ys = data['B'][i]
zs = data['label'][i]
if zs == 0:
a = 'r'
b = 'o'
else:
a = 'b'
b = '^'
ax.scatter(xs, ys, zs, c=a, marker=b)
ax.set_xlabel('A')
ax.set_ylabel('B')
ax.set_zlabel('Labels')
plt.show()
return
def linear_svm_classification(training_set_features, testing_set_features, training_set_labels, testing_set_labels):
"""
This function conducts a linear kernel SVM classification with 5 folds CV using a grid search to fit to best
learning rate
:param training_set_features: multi-dimensional array representing training set features.
:param testing_set_features: multi-dimensional array representing testing set features.
:param training_set_labels: uni-dimensional array representing training set labels.
:param testing_set_labels: uni-dimensional array representing testing set labels.
:return: Three elements tuple respectively method used (String), best accuracy score on parameters grid in 5-folds
CV (float), accuracy score on test set
"""
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
method = "svm_linear"
scaler = StandardScaler()
scaled_feats_train = scaler.fit_transform(training_set_features)
svr = svm.SVC(kernel='linear', random_state=10)
parameters = {'C': [0.1, 0.5, 1, 5, 10, 50, 100]}
clf = GridSearchCV(svr, parameters, cv=5, scoring='accuracy')
clf.fit(scaled_feats_train, training_set_labels)
scaled_feats_test = scaler.transform(testing_set_features)
predicted_lab_test = clf.predict(scaled_feats_test)
best_score = clf.best_score_
test_score = accuracy_score(testing_set_labels, predicted_lab_test, normalize=True)
return method, best_score, test_score
def polynomial_svm_classification(training_set_features, testing_set_features, training_set_labels, testing_set_labels):
"""
This function conducts a polynomial kernel SVM classification with 5 folds CV using a grid search to fit to best
learning rate, degree of polynomial and the kernel coefficient gamma
:param training_set_features: multi-dimensional array representing training set features.
:param testing_set_features: multi-dimensional array representing testing set features.
:param training_set_labels: uni-dimensional array representing training set labels.
:param testing_set_labels: uni-dimensional array representing testing set labels.
:return: Three elements tuple respectively method used (String), best accuracy score on parameters grid in 5-folds
CV (float), accuracy score on test set
"""
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
method = "svm_polynomial"
scaler = StandardScaler()
scaled_feats_train = scaler.fit_transform(training_set_features)
svr = svm.SVC(kernel='poly', random_state=10)
parameters = {'C': [0.1, 1, 3], 'degree': [4, 5, 6], 'gamma': [0.1, 1]}
clf = GridSearchCV(svr, parameters, cv=5, scoring='accuracy')
clf.fit(scaled_feats_train, training_set_labels)
scaled_feats_test = scaler.transform(testing_set_features)
predicted_lab_test = clf.predict(scaled_feats_test)
best_score = clf.best_score_
test_score = accuracy_score(testing_set_labels, predicted_lab_test, normalize=True)
return method, best_score, test_score
def rbf_svm_classification(training_set_features, testing_set_features, training_set_labels, testing_set_labels):
"""
This function conducts a rbf kernel SVM classification with 5 folds CV using a grid search to fit to best
learning rate and the kernel coefficient gamma
:param training_set_features: multi-dimensional array representing training set features.
:param testing_set_features: multi-dimensional array representing testing set features.
:param training_set_labels: uni-dimensional array representing training set labels.
:param testing_set_labels: uni-dimensional array representing testing set labels.
:return: Three elements tuple respectively method used (String), best accuracy score on parameters grid in 5-folds
CV (float), accuracy score on test set
"""
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
method = "svm_rbf"
scaler = StandardScaler()
scaled_feats_train = scaler.fit_transform(training_set_features)
svr = svm.SVC(kernel='rbf', random_state=10)
parameters = {'C': [0.1, 0.5, 1, 5, 10, 50, 100], 'gamma': [0.1, 0.5, 1, 3, 6, 10]}
clf = GridSearchCV(svr, parameters, cv=5, scoring='accuracy')
clf.fit(scaled_feats_train, training_set_labels)
scaled_feats_test = scaler.transform(testing_set_features)
predicted_lab_test = clf.predict(scaled_feats_test)
best_score = clf.best_score_
test_score = accuracy_score(testing_set_labels, predicted_lab_test, normalize=True)
return method, best_score, test_score
def logistic_classification(training_set_features, testing_set_features, training_set_labels, testing_set_labels):
"""
This function conducts a logistic regression with 5 folds CV using a grid search to fit to best learning rate
:param training_set_features: multi-dimensional array representing training set features.
:param testing_set_features: multi-dimensional array representing testing set features.
:param training_set_labels: uni-dimensional array representing training set labels.
:param testing_set_labels: uni-dimensional array representing testing set labels.
:return: Three elements tuple respectively method used (String), best accuracy score on parameters grid in 5-folds
CV (float), accuracy score on test set
"""
from sklearn import linear_model
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
method = "logistic"
svr = linear_model.LogisticRegression(random_state=10)
parameters = {'C': [0.1, 0.5, 1, 5, 10, 50, 100]}
clf = GridSearchCV(svr, parameters, cv=5, scoring='accuracy')
clf.fit(training_set_features, training_set_labels)
predicted_lab_test = clf.predict(testing_set_features)
best_score = clf.best_score_
test_score = accuracy_score(testing_set_labels, predicted_lab_test, normalize=True)
return method, best_score, test_score
def knn_classification(training_set_features, testing_set_features, training_set_labels, testing_set_labels):
"""
This function conducts a K nearest neighbours classification with 5 folds CV using a grid search to fit to best
number of number and number of leafs
:param training_set_features: multi-dimensional array representing training set features.
:param testing_set_features: multi-dimensional array representing testing set features.
:param training_set_labels: uni-dimensional array representing training set labels.
:param testing_set_labels: uni-dimensional array representing testing set labels.
:return: Three elements tuple respectively method used (String), best accuracy score on parameters grid in 5-folds
CV (float), accuracy score on test set
"""
from sklearn import neighbors
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
method = "knn"
scaler = StandardScaler()
scaled_feats_train = scaler.fit_transform(training_set_features)
svr = neighbors.KNeighborsClassifier(metric='euclidean')
parameters = {'n_neighbors': range(1, 51), 'leaf_size': [i for i in range(1, 61) if i % 5 == 0]}
clf = GridSearchCV(svr, parameters, cv=5, scoring='accuracy')
clf.fit(scaled_feats_train, training_set_labels)
scaled_feats_test = scaler.transform(testing_set_features)
predicted_lab_test = clf.predict(scaled_feats_test)
best_score = clf.best_score_
test_score = accuracy_score(testing_set_labels, predicted_lab_test, normalize=True)
return method, best_score, test_score
def tree_classification(training_set_features, testing_set_features, training_set_labels, testing_set_labels):
"""
This function conducts a tree classification with 5 folds CV using a grid search to fit to best number of maximum
tree depth and minimum number after which a leaf cannot split further.
:param training_set_features: multi-dimensional array representing training set features.
:param testing_set_features: multi-dimensional array representing testing set features.
:param training_set_labels: uni-dimensional array representing training set labels.
:param testing_set_labels: uni-dimensional array representing testing set labels.
:return: Three elements tuple respectively method used (String), best accuracy score on parameters grid in 5-folds
CV (float), accuracy score on test set
"""
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
method = "decision_tree"
scaler = StandardScaler()
scaled_feats_train = scaler.fit_transform(training_set_features)
svr = DecisionTreeClassifier(random_state=0)
parameters = {'max_depth': range(1, 51), 'min_samples_split': range(2, 11)}
clf = GridSearchCV(svr, parameters, cv=5, scoring='accuracy')
clf.fit(scaled_feats_train, training_set_labels)
scaled_feats_test = scaler.transform(testing_set_features)
predicted_lab_test = clf.predict(scaled_feats_test)
best_score = clf.best_score_
test_score = accuracy_score(testing_set_labels, predicted_lab_test, normalize=True)
return method, best_score, test_score
def rf_classification(training_set_features, testing_set_features, training_set_labels, testing_set_labels):
"""
This function conducts a random forest classification with 5 folds CV using a grid search to fit to best number of
maximum tree depth and minimum number after which a leaf cannot split further.
:param training_set_features: multi-dimensional array representing training set features.
:param testing_set_features: multi-dimensional array representing testing set features.
:param training_set_labels: uni-dimensional array representing training set labels.
:param testing_set_labels: uni-dimensional array representing testing set labels.
:return: Three elements tuple respectively method used (String), best accuracy score on parameters grid in 5-folds
CV (float), accuracy score on test set
"""
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
method = "random_forest"
scaler = StandardScaler()
scaled_feats_train = scaler.fit_transform(training_set_features)
svr = RandomForestClassifier(random_state=0)
parameters = {'max_depth': range(1, 51), 'min_samples_split': range(2, 11)}
clf = GridSearchCV(svr, parameters, cv=5, scoring='accuracy')
clf.fit(scaled_feats_train, training_set_labels)
scaled_feats_test = scaler.transform(testing_set_features)
predicted_lab_test = clf.predict(scaled_feats_test)
best_score = clf.best_score_
test_score = accuracy_score(testing_set_labels, predicted_lab_test, normalize=True)
return method, best_score, test_score
def output_csv_writer(method, best_score, test_score):
"""
This function writes an output in a file called output3.csv
:param method: The method used to build classification
:param best_score: Best accuracy score on parameters grid in 5-folds CV (float)
:param test_score: Accuracy score on test set
:return: None
"""
with open('./output3.csv', 'a') as f:
f.write("%s,%f,%f\n" % (str(method), float(best_score), float(test_score)))
f.close()
return
if __name__ == "__main__":
datum = csv_dataset_reader()
# graph_3d_my_data(datum)
feats, lab = features_labels_extractor(datum)
feats_train, feats_test, lab_train, lab_test = train_test_splitter(feats, lab)
m, b_score, t_score = linear_svm_classification(feats_train, feats_test, lab_train, lab_test)
output_csv_writer(m, b_score, t_score)
m, b_score, t_score = polynomial_svm_classification(feats_train, feats_test, lab_train, lab_test)
output_csv_writer(m, b_score, t_score)
m, b_score, t_score = rbf_svm_classification(feats_train, feats_test, lab_train, lab_test)
output_csv_writer(m, b_score, t_score)
m, b_score, t_score = logistic_classification(feats_train, feats_test, lab_train, lab_test)
output_csv_writer(m, b_score, t_score)
m, b_score, t_score = knn_classification(feats_train, feats_test, lab_train, lab_test)
output_csv_writer(m, b_score, t_score)
m, b_score, t_score = tree_classification(feats_train, feats_test, lab_train, lab_test)
output_csv_writer(m, b_score, t_score)
m, b_score, t_score = rf_classification(feats_train, feats_test, lab_train, lab_test)
output_csv_writer(m, b_score, t_score)
| |
from __future__ import print_function, division, absolute_import
import io
import os
from toolz import merge
from warnings import warn
from .compression import seekable_files, files as compress_files
from .utils import SeekableFile
from ..compatibility import PY2, unicode
from ..base import tokenize
from ..delayed import delayed, Delayed, apply
from ..utils import (infer_storage_options, system_encoding,
build_name_function, infer_compression,
import_required)
delayed = delayed(pure=True)
# Global registration dictionaries for backend storage functions
# See docstrings to functions below for more information
_read_bytes = dict()
_open_files_write = dict()
_open_files = dict()
_open_text_files = dict()
def write_block_to_file(data, f, compression, encoding):
"""
Parameters
----------
data : data to write
Either str/bytes, or iterable producing those, or something file-like
which can be read.
f : file-like
backend-dependent file-like object
compression : string
a key of `compress_files`
encoding : string (None)
if a string (e.g., 'ascii', 'utf8'), implies text mode, otherwise no
encoding and binary mode.
"""
original = False
f2 = f
f = SeekableFile(f)
if compression:
original = True
f = compress_files[compression](f, mode='wb')
try:
if isinstance(data, (str, bytes)):
if encoding:
f.write(data.encode(encoding=encoding))
else:
f.write(data)
elif isinstance(data, io.IOBase):
# file-like
out = '1'
while out:
out = data.read(64 * 2 ** 10)
if encoding:
f.write(out.encode(encoding=encoding))
else:
f.write(out)
else:
# iterable, e.g., bag contents
start = False
for d in data:
if start:
f.write(b'\n')
else:
start = True
if encoding:
f.write(d.encode(encoding=encoding))
else:
f.write(d)
finally:
f.close()
if original:
f2.close()
def write_bytes(data, urlpath, name_function=None, compression=None,
encoding=None, **kwargs):
"""For a list of values which evaluate to byte, produce delayed values
which, when executed, result in writing to files.
The path maybe a concrete directory, in which case it is interpreted
as a directory, or a template for numbered output.
The path may be preceded by a protocol, like ``s3://`` or ``hdfs://`` if
those libraries are installed.
Parameters
----------
data: list of ``dask.Delayed`` objects or dask collection
the data to be written
urlpath: string
Absolute or relative filepaths, URLs (may include protocols like
``s3://``); may be globstring (include `*`).
name_function: function or None
If using a globstring, this provides the conversion from part number
to test to replace `*` with.
compression: string or None
String like 'gzip' or 'xz'. Must support efficient random access.
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> values = write_bytes(vals, 's3://bucket/part-*.csv') # doctest: +SKIP
Returns
-------
list of ``dask.Delayed`` objects
"""
if isinstance(urlpath, (tuple, list, set)):
if len(data) != len(urlpath):
raise ValueError('Number of paths and number of delayed objects'
'must match (%s != %s)', len(urlpath), len(data))
storage_options = infer_storage_options(urlpath[0],
inherit_storage_options=kwargs)
del storage_options['path']
paths = [infer_storage_options(u, inherit_storage_options=kwargs)['path']
for u in urlpath]
elif isinstance(urlpath, (str, unicode)):
storage_options = infer_storage_options(urlpath,
inherit_storage_options=kwargs)
path = storage_options.pop('path')
paths = _expand_paths(path, name_function, len(data))
else:
raise ValueError('URL spec must be string or sequence of strings')
if compression == 'infer':
compression = infer_compression(paths[0])
if compression is not None and compression not in compress_files:
raise ValueError("Compression type %s not supported" % compression)
protocol = storage_options.pop('protocol')
ensure_protocol(protocol)
try:
open_files_write = _open_files_write[protocol]
except KeyError:
raise NotImplementedError("Unknown protocol for writing %s (%s)" %
(protocol, urlpath))
keys = ['write-block-%s' % tokenize(d.key, p, storage_options,
compression, encoding) for (d, p) in zip(data, paths)]
return [Delayed(key, dasks=[{key: (write_block_to_file, v.key,
(apply, open_files_write, (p,),
storage_options),
compression, encoding),
}, v.dask])
for key, v, p in zip(keys, data, paths)]
def read_bytes(urlpath, delimiter=None, not_zero=False, blocksize=2**27,
sample=True, compression=None, **kwargs):
""" Convert path to a list of delayed values
The path may be a filename like ``'2015-01-01.csv'`` or a globstring
like ``'2015-*-*.csv'``.
The path may be preceded by a protocol, like ``s3://`` or ``hdfs://`` if
those libraries are installed.
This cleanly breaks data by a delimiter if given, so that block boundaries
start directly after a delimiter and end on the delimiter.
Parameters
----------
urlpath: string
Absolute or relative filepath, URL (may include protocols like
``s3://``), or globstring pointing to data.
delimiter: bytes
An optional delimiter, like ``b'\n'`` on which to split blocks of bytes
not_zero: force seek of start-of-file delimiter, discarding header
blocksize: int (=128MB)
Chunk size
compression: string or None
String like 'gzip' or 'xz'. Must support efficient random access.
sample: bool, int
Whether or not to return a sample from the first 10k bytes
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> sample, blocks = read_bytes('2015-*-*.csv', delimiter=b'\\n') # doctest: +SKIP
>>> sample, blocks = read_bytes('s3://bucket/2015-*-*.csv', delimiter=b'\\n') # doctest: +SKIP
Returns
-------
10kB sample header and list of ``dask.Delayed`` objects or list of lists of
delayed objects if ``fn`` is a globstring.
"""
if compression is not None and compression not in compress_files:
raise ValueError("Compression type %s not supported" % compression)
storage_options = infer_storage_options(urlpath,
inherit_storage_options=kwargs)
protocol = storage_options.pop('protocol')
ensure_protocol(protocol)
try:
read_bytes = _read_bytes[protocol]
except KeyError:
raise NotImplementedError("Unknown protocol for reading %s (%s)" %
(protocol, urlpath))
return read_bytes(storage_options.pop('path'), delimiter=delimiter,
not_zero=not_zero, blocksize=blocksize, sample=sample,
compression=compression, **storage_options)
def open_files_by(open_files_backend, path, compression=None, **kwargs):
""" Given open files backend and path return dask.delayed file-like objects
NOTE: This is an internal helper function, please refer to
:func:`open_files` documentation for more details.
Parameters
----------
path: string
Filepath or globstring
compression: string
Compression to use. See ``dask.bytes.compression.files`` for options.
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Returns
-------
List of ``dask.delayed`` objects that compute to file-like objects
"""
files = open_files_backend(path, **kwargs)
if compression:
decompress = merge(seekable_files, compress_files)[compression]
if PY2:
files = [delayed(SeekableFile)(file) for file in files]
files = [delayed(decompress)(file) for file in files]
return files
def open_files(urlpath, compression=None, **kwargs):
""" Given path return dask.delayed file-like objects
Parameters
----------
urlpath: string
Absolute or relative filepath, URL (may include protocols like
``s3://``), or globstring pointing to data.
compression: string
Compression to use. See ``dask.bytes.compression.files`` for options.
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> files = open_files('2015-*-*.csv') # doctest: +SKIP
>>> files = open_files('s3://bucket/2015-*-*.csv.gz', compression='gzip') # doctest: +SKIP
Returns
-------
List of ``dask.delayed`` objects that compute to file-like objects
"""
if compression is not None and compression not in compress_files:
raise ValueError("Compression type %s not supported" % compression)
storage_options = infer_storage_options(urlpath,
inherit_storage_options=kwargs)
protocol = storage_options.pop('protocol')
ensure_protocol(protocol)
try:
open_files_backend = _open_files[protocol]
except KeyError:
raise NotImplementedError("Unknown protocol %s (%s)" %
(protocol, urlpath))
return open_files_by(open_files_backend, storage_options.pop('path'),
compression=compression, **storage_options)
def _expand_paths(path, name_function, num):
if isinstance(path, (str, unicode)):
if path.count('*') > 1:
raise ValueError("Output path spec must contain at most one '*'.")
if name_function is None:
name_function = build_name_function(num - 1)
if '*' not in path:
path = os.path.join(path, '*.part')
formatted_names = [name_function(i) for i in range(num)]
if formatted_names != sorted(formatted_names):
warn("In order to preserve order between partitions "
"name_function must preserve the order of its input")
paths = [path.replace('*', name_function(i))
for i in range(num)]
elif isinstance(path, (tuple, list, set)):
assert len(path) == num
paths = path
else:
raise ValueError("""Path should be either"
1. A list of paths -- ['foo.json', 'bar.json', ...]
2. A directory -- 'foo/
3. A path with a * in it -- 'foo.*.json'""")
return paths
def open_text_files(urlpath, encoding=system_encoding, errors='strict',
compression=None, **kwargs):
""" Given path return dask.delayed file-like objects in text mode
Parameters
----------
urlpath: string
Absolute or relative filepath, URL (may include protocols like
``s3://``), or globstring pointing to data.
encoding: string
errors: string
compression: string
Compression to use. See ``dask.bytes.compression.files`` for options.
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> files = open_text_files('2015-*-*.csv', encoding='utf-8') # doctest: +SKIP
>>> files = open_text_files('s3://bucket/2015-*-*.csv') # doctest: +SKIP
Returns
-------
List of ``dask.delayed`` objects that compute to text file-like objects
"""
if compression is not None and compression not in compress_files:
raise ValueError("Compression type %s not supported" % compression)
storage_options = infer_storage_options(urlpath,
inherit_storage_options=kwargs)
path = storage_options.pop('path')
protocol = storage_options.pop('protocol')
ensure_protocol(protocol)
if protocol in _open_text_files and compression is None:
return _open_text_files[protocol](path,
encoding=encoding,
errors=errors,
**storage_options)
elif protocol in _open_files:
files = open_files_by(_open_files[protocol],
path,
compression=compression,
**storage_options)
if PY2:
files = [delayed(SeekableFile)(file) for file in files]
return [delayed(io.TextIOWrapper)(file, encoding=encoding,
errors=errors) for file in files]
else:
raise NotImplementedError("Unknown protocol %s (%s)" %
(protocol, urlpath))
def ensure_protocol(protocol):
if (protocol not in ('s3', 'hdfs') and ((protocol in _read_bytes) or
(protocol in _open_files))):
return
if protocol == 's3':
import_required('s3fs',
"Need to install `s3fs` library for s3 support\n"
" conda install s3fs -c conda-forge\n"
" or\n"
" pip install s3fs")
elif protocol == 'hdfs':
msg = ("Need to install `distributed` and `hdfs3` "
"for HDFS support\n"
" conda install distributed hdfs3 -c conda-forge")
import_required('distributed.hdfs', msg)
import_required('hdfs3', msg)
else:
raise ValueError("Unknown protocol %s" % protocol)
| |
"""
Distributed evaluation of genomes.
About compute nodes:
The primary node (=the node which creates and mutates genomes) and the secondary
nodes (=the nodes which evaluate genomes) can execute the same script. The
role of a compute node is determined using the ``mode`` argument of the
DistributedEvaluator. If the mode is MODE_AUTO, the `host_is_local()` function
is used to check if the ``addr`` argument points to the localhost. If it does,
the compute node starts as a primary node, otherwise as a secondary node. If
``mode`` is MODE_PRIMARY, the compute node always starts as a primary node. If
``mode`` is MODE_SECONDARY, the compute node will always start as a secondary node.
There can only be one primary node per NEAT, but any number of secondary nodes.
The primary node will not evaluate any genomes, which means you will always need
at least two compute nodes.
You can run any number of compute nodes on the same physical machine (or VM).
However, if a machine has both a primary node and one or more secondary nodes,
MODE_AUTO cannot be used for those secondary nodes - MODE_SECONDARY will need to be
specified.
NOTE:
This module is in a **beta** state, and still *unstable* even in single-machine
testing. Reliability is likely to vary, including depending on the Python version
and implementation (e.g., cpython vs pypy) in use and the likelihoods of timeouts
(due to machine and/or network slowness). In particular, while the code can try
to reconnect between between primary and secondary nodes, as noted in the
`multiprocessing` documentation this may not work due to data loss/corruption.
Note also that this module is *not* responsible for starting the script copies
on the different compute nodes, since this is very site/configuration-dependent.
Usage:
1. Import modules and define the evaluation logic (the eval_genome function).
(After this, check for ``if __name__ == '__main__'``, and put the rest of
the code inside the body of the statement.)
2. Load config and create a population - here, the variable ``p``.
3. If required, create and add reporters.
4. Create a ``DistributedEvaluator(addr_of_primary_node, b'some_password',
eval_function, mode=MODE_AUTO)`` - here, the variable ``de``.
5. Call ``de.start(exit_on_stop=True)``. The `start()` call will block on the
secondary nodes and call `sys.exit(0)` when the NEAT evolution finishes. This
means that the following code will only be executed on the primary node.
6. Start the evaluation using ``p.run(de.evaluate, number_of_generations)``.
7. Stop the secondary nodes using ``de.stop()``.
8. You are done. You may want to save the winning genome or show some statistics.
See ``examples/xor/evolve-feedforward-distributed.py`` for a complete example.
Utility functions:
``host_is_local(hostname, port=22)`` returns True if ``hostname`` points to
the local node/host. This can be used to check if a compute node will run as
a primary node or as a secondary node with MODE_AUTO.
``chunked(data, chunksize)``: splits data into a list of chunks with at most
``chunksize`` elements.
"""
from __future__ import print_function
import socket
import sys
import time
import warnings
# below still needed for queue.Empty
try:
# pylint: disable=import-error
import Queue as queue
except ImportError:
# pylint: disable=import-error
import queue
import multiprocessing
from multiprocessing import managers
from argparse import Namespace
# Some of this code is based on
# http://eli.thegreenplace.net/2012/01/24/distributed-computing-in-python-with-multiprocessing
# According to the website, the code is in the public domain
# ('public domain' links to unlicense.org).
# This means that we can use the code from this website.
# Thanks to Eli Bendersky for making his code open for use.
# modes to determine the role of a compute node
# the primary handles the evolution of the genomes
# the secondary handles the evaluation of the genomes
MODE_AUTO = 0 # auto-determine mode
MODE_PRIMARY = MODE_MASTER = 1 # enforce primary mode
MODE_SECONDARY = MODE_SLAVE = 2 # enforce secondary mode
# what a return from _check_exception means
_EXCEPTION_TYPE_OK = 1 # queue empty and similar; try again
_EXCEPTION_TYPE_UNCERTAIN = 0 # disconnected but may be able to reconnect
_EXCEPTION_TYPE_BAD = -1 # either raise it again or immediately return and exit with non-zero status code
class ModeError(RuntimeError):
"""
An exception raised when a mode-specific method is being
called without being in the mode - either a primary-specific method
called by a secondary node or a secondary-specific method called by a primary node.
"""
pass
def host_is_local(hostname, port=22): # no port specified, just use the ssh port
"""
Returns True if the hostname points to the localhost, otherwise False.
"""
hostname = socket.getfqdn(hostname)
if hostname in ("localhost", "0.0.0.0", "127.0.0.1", "1.0.0.127.in-addr.arpa",
"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa"):
return True
localhost = socket.gethostname()
if hostname == localhost:
return True
localaddrs = socket.getaddrinfo(localhost, port)
targetaddrs = socket.getaddrinfo(hostname, port)
for (ignored_family, ignored_socktype, ignored_proto, ignored_canonname,
sockaddr) in localaddrs:
for (ignored_rfamily, ignored_rsocktype, ignored_rproto,
ignored_rcanonname, rsockaddr) in targetaddrs:
if rsockaddr[0] == sockaddr[0]:
return True
return False
def _determine_mode(addr, mode):
"""
Returns the mode which should be used.
If mode is MODE_AUTO, this is determined by checking if 'addr' points to the
local host. If it does, return MODE_PRIMARY, else return MODE_SECONDARY.
If mode is either MODE_PRIMARY or MODE_SECONDARY,
return the 'mode' argument. Otherwise, a ValueError is raised.
"""
if isinstance(addr, tuple):
host = addr[0]
elif type(addr) == type(b"binary_string"):
host = addr
else:
raise TypeError("'addr' needs to be a tuple or an bytestring!")
if mode == MODE_AUTO:
if host_is_local(host):
return MODE_PRIMARY
return MODE_SECONDARY
elif mode in (MODE_SECONDARY, MODE_PRIMARY):
return mode
else:
raise ValueError("Invalid mode {!r}!".format(mode))
def chunked(data, chunksize):
"""
Returns a list of chunks containing at most ``chunksize`` elements of data.
"""
if chunksize < 1:
raise ValueError("Chunksize must be at least 1!")
if int(chunksize) != chunksize:
raise ValueError("Chunksize needs to be an integer")
res = []
cur = []
for e in data:
cur.append(e)
if len(cur) >= chunksize:
res.append(cur)
cur = []
if cur:
res.append(cur)
return res
class _ExtendedManager(object):
"""A class for managing the multiprocessing.managers.SyncManager"""
__safe_for_unpickling__ = True # this may not be safe for unpickling,
# but this is required by pickle.
def __init__(self, addr, authkey, mode, start=False):
self.addr = addr
self.authkey = authkey
self.mode = _determine_mode(addr, mode)
self.manager = None
if start:
self.start()
def __reduce__(self): # pragma: no cover
"""
This method is used by pickle to serialize instances of this class.
"""
return (
self.__class__,
(self.addr, self.authkey, self.mode, bool(self.manager is not None)),
)
def start(self):
"""Starts or connects to the manager."""
if self.manager is None:
if self.mode == MODE_PRIMARY:
i = self._start()
else:
i = self._connect()
self.manager = i
def stop(self):
"""Stops the manager."""
#self.manager.shutdown() # claims there isn't any such attribute ?!?
self.manager = None
@staticmethod
def _get_manager_class(register_callables=False):
"""
Returns a new 'Manager' subclass with registered methods.
If 'register_callable' is True, defines the 'callable' arguments.
"""
class _EvaluatorSyncManager(managers.BaseManager):
"""
A custom BaseManager.
Please see the documentation of `multiprocessing` for more
information.
"""
pass
inqueue = queue.Queue() # may need to be one from multiprocessing.managers.SyncManager
outqueue = queue.Queue() # ditto
namespace = Namespace() # ditto
if register_callables:
_EvaluatorSyncManager.register(
"get_inqueue",
callable=lambda: inqueue,
)
_EvaluatorSyncManager.register(
"get_outqueue",
callable=lambda: outqueue,
)
_EvaluatorSyncManager.register(
"get_namespace",
callable=lambda: namespace,
)
else:
_EvaluatorSyncManager.register(
"get_inqueue",
)
_EvaluatorSyncManager.register(
"get_outqueue",
)
_EvaluatorSyncManager.register(
"get_namespace",
)
return _EvaluatorSyncManager
def _connect(self):
"""Connects to the manager."""
cls = self._get_manager_class(register_callables=False)
ins = cls(address=self.addr, authkey=self.authkey)
ins.connect()
return ins
def _start(self):
"""Starts the manager."""
cls = self._get_manager_class(register_callables=True)
ins = cls(address=self.addr, authkey=self.authkey)
ins.start()
return ins
def get_inqueue(self):
"""Returns the inqueue."""
if self.manager is None:
raise RuntimeError("Manager not started")
return self.manager.get_inqueue()
def get_outqueue(self):
"""Returns the outqueue."""
if self.manager is None:
raise RuntimeError("Manager not started")
return self.manager.get_outqueue()
def get_namespace(self):
"""Returns the namespace."""
if self.manager is None:
raise RuntimeError("Manager not started")
return self.manager.get_namespace()
class DistributedEvaluator(object):
"""An evaluator working across multiple machines"""
def __init__(
self,
addr,
authkey,
eval_function,
secondary_chunksize=1,
num_workers=None,
worker_timeout=60,
mode=MODE_AUTO,
):
"""
``addr`` should be a tuple of (hostname, port) pointing to the machine
running the DistributedEvaluator in primary mode. If mode is MODE_AUTO,
the mode is determined by checking whether the hostname points to this
host or not.
``authkey`` is the password used to restrict access to the manager; see
``Authentication Keys`` in the `multiprocessing` manual for more information.
All DistributedEvaluators need to use the same authkey. Note that this needs
to be a `bytes` object for Python 3.X, and should be in 2.7 for compatibility
(identical in 2.7 to a `str` object).
``eval_function`` should take two arguments (a genome object and the
configuration) and return a single float (the genome's fitness).
'secondary_chunksize' specifies the number of genomes that will be sent to
a secondary at any one time.
``num_workers`` is the number of child processes to use if in secondary
mode. It defaults to None, which means `multiprocessing.cpu_count()`
is used to determine this value. If 1 in a secondary node, the process creating
the DistributedEvaluator instance will also do the evaulations.
``worker_timeout`` specifies the timeout (in seconds) for a secondary node
getting the results from a worker subprocess; if None, there is no timeout.
``mode`` specifies the mode to run in; it defaults to MODE_AUTO.
"""
self.addr = addr
self.authkey = authkey
self.eval_function = eval_function
self.secondary_chunksize = secondary_chunksize
self.slave_chunksize = secondary_chunksize # backward compatibility
if num_workers:
self.num_workers = num_workers
else:
try:
self.num_workers = max(1,multiprocessing.cpu_count())
except (RuntimeError, AttributeError): # pragma: no cover
print("multiprocessing.cpu_count() gave an error; assuming 1",
file=sys.stderr)
self.num_workers = 1
self.worker_timeout = worker_timeout
self.mode = _determine_mode(self.addr, mode)
self.em = _ExtendedManager(self.addr, self.authkey, mode=self.mode, start=False)
self.inqueue = None
self.outqueue = None
self.namespace = None
self.started = False
self.exit_string = None
self.exit_on_stop = True
self.reconnect = False
self.reconnect_max_time = None
self.n_tasks = None
def __getstate__(self):
"""Required by the pickle protocol."""
# we do not actually save any state, but we need __getstate__ to be
# called.
return True # return some nonzero value
def __setstate__(self, state):
"""Called when instances of this class are unpickled."""
self._set_shared_instances()
def is_primary(self):
"""Returns True if the caller is the primary node"""
return (self.mode == MODE_PRIMARY)
def is_master(self): # pragma: no cover
"""Returns True if the caller is the primary (master) node"""
warnings.warn("Use is_primary, not is_master", DeprecationWarning)
return self.is_primary()
def _do_exit(self):
if self.exit_string is None:
sys.exit(0)
else:
sys.exit(self.exit_string)
def start(self, exit_on_stop=True, secondary_wait=0, reconnect=False, reconnect_max_time=None):
"""
If the DistributedEvaluator is in primary mode, starts the manager
process and returns. In this case, the ``exit_on_stop`` argument will
be ignored.
If the DistributedEvaluator is in secondary mode, it connects to the manager
and waits for tasks.
If in secondary mode and ``exit_on_stop`` is True, sys.exit() will be called
when the connection is lost.
``secondary_wait`` specifies the time (in seconds) to sleep before actually
starting when in secondary mode.
If 'reconnect' is True, the secondary nodes will try to reconnect when
the connection is lost. In this case, sys.exit() will only be called
when 'exit_on_stop' is True and the primary node send a forced shutdown
command.
"""
if self.started:
raise RuntimeError("DistributedEvaluator already started!")
self.started = True
self.exit_on_stop = exit_on_stop
self.reconnect = reconnect
if reconnect_max_time is None:
if reconnect:
reconnect_max_time = max((5*60),(15*max(5,self.worker_timeout)))
else:
reconnect_max_time = max(60,(5*max(1,self.worker_timeout)))
self.reconnect_max_time = max(0.3,reconnect_max_time)
if self.mode == MODE_PRIMARY:
self._start_primary()
elif self.mode == MODE_SECONDARY:
time.sleep(secondary_wait)
while True:
self._start_secondary()
self._secondary_loop(reconnect_max_time=reconnect_max_time)
if self.exit_on_stop:
self._do_exit()
else:
self.inqueue = self.outqueue = self.namespace = None
if self.reconnect:
self.em.stop()
else:
break
if exit_on_stop:
self._do_exit()
else:
raise ValueError("Invalid mode {!r}!".format(self.mode))
def stop(self, wait=1, shutdown=True, force_secondary_shutdown=False):
"""
Stops all secondaries.
'wait' specifies the time (in seconds) to wait before shutting down the
manager or returning.
If 'shutdown', shutdown the manager.
If 'force_secondary_shutdown', shutdown the secondary nodes even if
they are started with 'reconnect=True'.
"""
if self.mode != MODE_PRIMARY:
raise ModeError("Not in primary mode!")
if not self.started:
raise RuntimeError("Not yet started!")
start_time = time.time()
num_added = 0
if self.n_tasks is None: # pragma: no cover
self.n_tasks = max(1, wait, self.worker_timeout)*5
warnings.warn("Self.n_tasks is None; estimating at {:n}".format(self.n_tasks))
while (num_added < self.n_tasks) and ((time.time() - start_time) <
max(1,
self.reconnect_max_time,
wait,
self.worker_timeout)):
try:
if force_secondary_shutdown:
self.inqueue.put(0, block=True, timeout=0.2)
else:
self.inqueue.put(1, block=True, timeout=0.2)
except (EOFError, IOError, OSError, socket.gaierror, TypeError, queue.Full,
managers.RemoteError, multiprocessing.ProcessError) as e:
if ("timed" in repr(e).lower()) or ("timeout" in repr(e).lower()):
if (time.time() - start_time) < max(1, wait, self.worker_timeout):
num_added += 1
continue
else:
break
else:
break
else:
num_added += 1
time_passed = time.time() - start_time
if time_passed < wait:
time.sleep(wait - time_passed)
if shutdown:
self.em.stop()
self.started = False
self.outqueue = self.inqueue = self.namespace = None
def _start_primary(self):
"""Start as the primary"""
self.em.start()
self._set_shared_instances()
def _start_secondary(self):
"""Start as a secondary."""
self.em.start()
self._set_shared_instances()
def _set_shared_instances(self):
"""Sets attributes from the shared instances."""
self.inqueue = self.em.get_inqueue()
self.outqueue = self.em.get_outqueue()
self.namespace = self.em.get_namespace()
def _reset_em(self):
"""Resets self.em and the shared instances."""
self.em = _ExtendedManager(self.addr, self.authkey, mode=self.mode, start=True)
self._set_shared_instances()
@staticmethod
def _check_exception(e):
string = repr(e).lower()
if ('timed' in string) or ('timeout' in string):
return _EXCEPTION_TYPE_OK
elif isinstance(e, (EOFError, TypeError, socket.gaierror)):
return _EXCEPTION_TYPE_UNCERTAIN
elif (('eoferror' in string) or ('typeerror' in string) or ('gaierror' in string)
or ('pipeerror' in string) or ('authenticationerror' in string)
or ('refused' in string) or ('file descriptor' in string)):
return _EXCEPTION_TYPE_UNCERTAIN
return _EXCEPTION_TYPE_BAD
def _secondary_loop(self, reconnect_max_time=(5*60)):
"""The worker loop for the secondary nodes."""
if self.num_workers > 1:
pool = multiprocessing.Pool(self.num_workers)
else:
pool = None
should_reconnect = True
if self.reconnect:
em_bad = False
else:
em_bad = True
while should_reconnect:
last_time_done = time.time() # so that if loops below, have a chance to check _reset_em
running = True
try:
self._reset_em()
except (EOFError, IOError, OSError, socket.gaierror, TypeError,
managers.RemoteError, multiprocessing.ProcessError) as e:
if (time.time() - last_time_done) >= reconnect_max_time:
should_reconnect = False
em_bad = True
if self._check_exception(e) == _EXCEPTION_TYPE_BAD: # pragma: no cover
self.exit_on_stop = True
self.exit_string = repr(e)
break
elif self._check_exception(e) == _EXCEPTION_TYPE_BAD: # pragma: no cover
raise
else:
continue
last_time_done = time.time() # being successful at reconnecting can be used as a keepalive
while running:
try:
tasks = self.inqueue.get(block=True, timeout=0.2)
except queue.Empty:
continue
except (EOFError, TypeError, socket.gaierror,
managers.RemoteError, multiprocessing.ProcessError, IOError, OSError) as e:
if ('empty' in repr(e).lower()):
continue
curr_status = self._check_exception(e)
if curr_status in (_EXCEPTION_TYPE_OK, _EXCEPTION_TYPE_UNCERTAIN):
if (time.time() - last_time_done) >= reconnect_max_time:
if em_bad:
should_reconnect = False
break
elif curr_status == _EXCEPTION_TYPE_OK:
continue
else:
break
elif (time.time() - last_time_done) >= reconnect_max_time: # pragma: no cover
self.exit_on_stop = True
self.exit_string = repr(e)
should_reconnect = False
break
else: # pragma: no cover
raise
if isinstance(tasks, int): # from primary
running = False
should_reconnect = False
if tasks and self.reconnect:
self.exit_on_stop = False
elif not tasks:
self.reconnect = False
break
last_time_done = time.time()
if pool is None:
res = []
for genome_id, genome, config in tasks:
fitness = self.eval_function(genome, config)
res.append((genome_id, fitness))
else:
genome_ids = []
jobs = []
for genome_id, genome, config in tasks:
genome_ids.append(genome_id)
jobs.append(
pool.apply_async(
self.eval_function, (genome, config)
)
)
results = [
job.get(timeout=self.worker_timeout) for job in jobs
]
res = zip(genome_ids, results)
last_time_done = time.time()
try:
self.outqueue.put(res)
except queue.Full: # pragma: no cover
continue
except (EOFError, TypeError, socket.gaierror,
managers.RemoteError, multiprocessing.ProcessError,
IOError, OSError) as e:
if ('full' in repr(e).lower()):
continue
curr_status = self._check_exception(e)
if curr_status in (_EXCEPTION_TYPE_OK, _EXCEPTION_TYPE_UNCERTAIN):
if (time.time() - last_time_done) >= reconnect_max_time:
if em_bad:
should_reconnect = False
break
elif curr_status == _EXCEPTION_TYPE_OK:
continue
else:
break
elif (time.time() - last_time_done) >= reconnect_max_time: # pragma: no cover
self.exit_on_stop = True
self.exit_string = repr(e)
should_reconnect = False
break
else: # pragma: no cover
raise
else:
last_time_done = time.time()
if ((time.time() - last_time_done) >= reconnect_max_time):
if em_bad:
should_reconnect = False
break
if pool is not None:
pool.terminate()
def evaluate(self, genomes, config):
"""
Evaluates the genomes.
This method raises a ModeError if the
DistributedEvaluator is not in primary mode.
"""
if self.mode != MODE_PRIMARY:
raise ModeError("Not in primary mode!")
tasks = [(genome_id, genome, config) for genome_id, genome in genomes]
id2genome = {genome_id: genome for genome_id, genome in genomes}
tasks = chunked(tasks, self.secondary_chunksize)
n_tasks = len(tasks)
for task in tasks:
self.inqueue.put(task) # should this be w/timeouts and checking for exceptions?
tresults = []
while len(tresults) < n_tasks:
try:
sr = self.outqueue.get(block=True, timeout=0.2)
except (queue.Empty, managers.RemoteError): # more detailed check?
continue
tresults.append(sr)
results = []
for sr in tresults:
results += sr
for genome_id, fitness in results:
genome = id2genome[genome_id]
genome.fitness = fitness
self.n_tasks = n_tasks
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom neural network layers.
Low-level primitives such as custom convolution with custom initialization.
"""
import math
import numpy as np
import tensorflow as tf
NCHW, NHWC = 'NCHW', 'NHWC'
DATA_FORMAT_ORDER = {
'channels_first': NCHW,
'channels_last': NHWC
}
def smart_shape(x):
s, t = x.shape, tf.shape(x)
return [t[i] if s[i].value is None else s[i] for i in range(len(s))]
def to_nchw(x):
return tf.transpose(x, [0, 3, 1, 2])
def to_nhwc(x):
return tf.transpose(x, [0, 2, 3, 1])
def torus_pad(x, w, order=NCHW):
if w < 1:
return x
if order == NCHW:
y = tf.concat([x[:, :, -w:], x, x[:, :, :w]], axis=2)
y = tf.concat([y[:, :, :, -w:], y, y[:, :, :, :w]], axis=3)
else:
y = tf.concat([x[:, -w:], x, x[:, :w]], axis=1)
y = tf.concat([y[:, :, -w:], y, y[:, :, :w]], axis=2)
return y
def downscale2d(x, n=2, order=NCHW):
"""Box downscaling.
Args:
x: 4D tensor.
n: integer scale.
order: NCHW or NHWC.
Returns:
4D tensor down scaled by a factor n.
"""
if n <= 1:
return x
if order == NCHW:
return tf.nn.avg_pool(x, [1, 1, n, n], [1, 1, n, n], 'VALID', 'NCHW')
else:
return tf.nn.avg_pool(x, [1, n, n, 1], [1, n, n, 1], 'VALID', 'NHWC')
def upscale2d(x, n=2, order=NCHW):
"""Box upscaling (also called nearest neighbors).
Args:
x: 4D tensor in NCHW format.
n: integer scale (must be a power of 2).
Returns:
4D tensor up scaled by a factor n.
"""
if n == 1:
return x
s, ts = x.shape, tf.shape(x)
if order == NCHW:
x = tf.reshape(x, [-1, s[1], ts[2], 1, ts[3], 1])
x = tf.tile(x, [1, 1, 1, n, 1, n])
x = tf.reshape(x, [-1, s[1], ts[2] * n, ts[3] * n])
else:
x = tf.reshape(x, [-1, ts[1], 1, ts[2], 1, s[3]])
x = tf.tile(x, [1, 1, n, 1, n, 1])
x = tf.reshape(x, [-1, ts[1] * n, ts[2] * n, s[3]])
return x
def remove_details2d(x, n=2):
"""Remove box details by upscaling a downscaled image.
Args:
x: 4D tensor in NCHW format.
n: integer scale (must be a power of 2).
Returns:
4D tensor image with removed details of size nxn.
"""
if n == 1:
return x
return upscale2d(downscale2d(x, n), n)
def bicubic_downscale2d(x, n=2, order=NCHW):
"""Downscale x by a factor of n, using dense bicubic weights.
Args:
x: 4D tensor in NCHW format.
n: integer scale (must be a power of 2).
Returns:
4D tensor down scaled by a factor n.
"""
def kernel_weight(x):
"""https://clouard.users.greyc.fr/Pantheon/experiments/rescaling/index-en.html#bicubic"""
x = abs(x)
if x <= 1:
return 1.5 * x ** 3 - 2.5 * x ** 2 + 1
elif 1 < x < 2:
return - 0.5 * x ** 3 + 2.5 * x ** 2 - 4 * x + 2
else:
return 0
def kernel():
k1d = np.array([kernel_weight((x + 0.5) / n) for x in range(-2 * n, 2 * n)])
k1d /= k1d.sum()
k2d = np.outer(k1d, k1d.T).astype('f')
return tf.constant(k2d.reshape((4 * n, 4 * n, 1, 1)))
if order == NHWC:
x = to_nchw(x)
y = tf.pad(x, [[0, 0], [0, 0], [2 * n - 1, 2 * n], [2 * n - 1, 2 * n]], mode='REFLECT')
s, ts = y.shape, tf.shape(y)
y = tf.reshape(y, [ts[0] * s[1], 1, ts[2], ts[3]])
y = tf.nn.conv2d(y, filter=kernel(), strides=[1, 1, n, n], padding='VALID', data_format='NCHW')
y = tf.reshape(y, [ts[0], s[1], tf.shape(y)[2], tf.shape(y)[3]])
return y if order == NCHW else to_nhwc(y)
def space_to_channels(x, n=2, order=NCHW):
"""Reshape image tensor by moving space to channels.
Args:
x: 4D tensor in NCHW format.
n: integer scale (must be a power of 2).
Returns:
Reshaped 4D tensor image of shape (N, C * n**2, H // n, W // n).
"""
s, ts = x.shape, tf.shape(x)
if order == NCHW:
x = tf.reshape(x, [-1, s[1], ts[2] // n, n, ts[3] // n, n])
x = tf.transpose(x, [0, 1, 3, 5, 2, 4])
x = tf.reshape(x, [-1, s[1] * (n ** 2), ts[2] // n, ts[3] // n])
else:
x = tf.reshape(x, [-1, ts[1] // n, n, ts[2] // n, n, s[3]])
x = tf.transpose(x, [0, 1, 3, 2, 4, 5])
x = tf.reshape(x, [-1, ts[1] // n, ts[2] // n, s[3] * (n ** 2)])
return x
def channels_to_space(x, n=2, order=NCHW):
"""Reshape image tensor by moving channels to space.
Args:
x: 4D tensor in NCHW format.
n: integer scale (must be a power of 2).
Returns:
Reshaped 4D tensor image of shape (N, C // n**2, H * n, W * n).
"""
s, ts = x.shape, tf.shape(x)
if order == NCHW:
x = tf.reshape(x, [-1, s[1] // (n ** 2), n, n, ts[2], ts[3]])
x = tf.transpose(x, [0, 1, 4, 2, 5, 3])
x = tf.reshape(x, [-1, s[1] // (n ** 2), ts[2] * n, ts[3] * n])
elif order == NHWC:
x = tf.reshape(x, [-1, ts[1], ts[2], n, n, s[3] // (n ** 2)])
x = tf.transpose(x, [0, 1, 3, 2, 4, 5])
x = tf.reshape(x, [-1, ts[1] * n, ts[2] * n, s[3] // (n ** 2)])
else:
assert 0, 'Only supporting NCHW and NHWC.'
return x
class HeNormalInitializer(tf.initializers.random_normal):
def __init__(self, slope, dtype=tf.float32):
self.slope = slope
self.dtype = dtype
def get_config(self):
return dict(slope=self.slope, dtype=self.dtype.name)
def __call__(self, shape, dtype=None, partition_info=None):
del partition_info
if dtype is None:
dtype = self.dtype
std = np.sqrt(2) * tf.rsqrt((1. + self.slope ** 2) *
tf.cast(tf.reduce_prod(shape[:-1]),
tf.float32))
return tf.random_normal(shape, stddev=std, dtype=dtype)
def blend_resolution(lores, hires, alpha):
"""Blend two images.
Args:
lores: 4D tensor in NCHW, low resolution image.
hires: 4D tensor in NCHW, high resolution image.
alpha: scalar tensor in [0, 1], 0 produces the low resolution, 1 the high one.
Returns:
4D tensor in NCHW of blended images.
"""
return lores + alpha * (hires - lores)
class SingleUpdate:
COLLECTION = 'SINGLE_UPDATE'
@classmethod
def get_update(cls, variable):
for v, u in tf.get_collection(cls.COLLECTION):
if v == variable:
return u
return None
@classmethod
def register_update(cls, variable, update):
assert cls.get_update(variable) is None
tf.add_to_collection(cls.COLLECTION, (variable, update))
return update
class Conv2DSpectralNorm(tf.layers.Conv2D):
def build(self, input_shape):
was_built = self.built
tf.layers.Conv2D.build(self, input_shape)
self.built = was_built
shape = self.kernel.shape.as_list()
self.u = self.add_variable(name='u', shape=[1, shape[-1]], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(),
trainable=False)
self.built = True
def call(self, inputs):
shape = self.kernel.shape.as_list()
kernel = self.kernel
if self.data_format == 'channels_first':
kernel = tf.transpose(kernel, [0, 2, 3, 1])
kernel = tf.reshape(kernel, [-1, shape[-1]])
u = self.u
v_ = tf.nn.l2_normalize(tf.matmul(u, kernel, transpose_b=True))
u_ = tf.nn.l2_normalize(tf.matmul(v_, kernel))
sigma = tf.squeeze(tf.matmul(tf.matmul(v_, kernel), u_, transpose_b=True))
if SingleUpdate.get_update(u) is None:
self.add_update(SingleUpdate.register_update(u, tf.assign(u, u_)))
outputs = self._convolution_op(inputs, self.kernel / sigma)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias, data_format=DATA_FORMAT_ORDER[self.data_format])
if self.activation is None:
return outputs
return self.activation(outputs)
def conv2d_spectral_norm(x, filters, kernel_size, strides=1, padding='same',
activation=None, data_format='channels_last', **kwargs):
layer = Conv2DSpectralNorm(filters, kernel_size, strides, padding,
activation=activation,
data_format=data_format, **kwargs)
return layer.apply(x)
class DenseSpectralNorm(tf.layers.Dense):
"""Spectral Norm version of tf.layers.Dense."""
def build(self, input_shape):
self.u = self.add_variable(name='u', shape=[1, self.units], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(),
trainable=False)
return tf.layers.Dense.build(self, input_shape)
def call(self, inputs):
inputs = tf.convert_to_tensor(inputs, dtype=self.dtype)
u = self.u
v_ = tf.nn.l2_normalize(tf.matmul(u, self.kernel, transpose_b=True))
u_ = tf.nn.l2_normalize(tf.matmul(v_, self.kernel))
sigma = tf.squeeze(tf.matmul(tf.matmul(v_, self.kernel), u_, transpose_b=True))
if SingleUpdate.get_update(u) is None:
self.add_update(SingleUpdate.register_update(u, tf.assign(u, u_)))
outputs = tf.matmul(inputs, self.kernel / sigma)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def dense_spectral_norm(inputs, units, activation=None, **kwargs):
"""Spectral Norm version of tf.layers.dense."""
layer = DenseSpectralNorm(units, activation, **kwargs)
return layer.apply(inputs)
class DenseSpectralNormCustom(tf.layers.Dense):
"""Spectral Norm version of tf.layers.Dense."""
def build(self, input_shape):
shape = [input_shape[-1], self.units]
self.u = self.add_variable(name='u', shape=[1, shape[0]], dtype=tf.float32, trainable=False)
self.v = self.add_variable(name='v', shape=[shape[1], 1], dtype=tf.float32, trainable=False)
return tf.layers.Dense.build(self, input_shape)
def call(self, inputs):
inputs = tf.convert_to_tensor(inputs, dtype=self.dtype)
u, v = self.u, self.v
v_ = tf.nn.l2_normalize(tf.reshape(tf.matmul(u, self.kernel), v.shape))
u_ = tf.nn.l2_normalize(tf.reshape(tf.matmul(self.kernel, v), u.shape))
sigma = tf.matmul(tf.matmul(u, self.kernel), v)[0, 0]
if SingleUpdate.get_update(u) is None:
self.add_update(SingleUpdate.register_update(u, tf.assign(u, u_)))
self.add_update(SingleUpdate.register_update(v, tf.assign(v, v_)))
outputs = tf.matmul(inputs, self.kernel / sigma)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def dense_spectral_norm_custom(inputs, units, activation=None, **kwargs):
"""Spectral Norm version of tf.layers.dense."""
layer = DenseSpectralNormCustom(units, activation, **kwargs)
return layer.apply(inputs)
def kaiming_scale(shape, activation):
activation_slope = {
tf.nn.relu: 0,
tf.nn.leaky_relu: 0.2
}
slope = activation_slope.get(activation, 1)
fanin = np.prod(shape[:-1])
return np.sqrt(2. / ((1 + slope ** 2) * fanin))
class DenseScaled(tf.layers.Dense):
def call(self, inputs):
scale = kaiming_scale(self.kernel.get_shape().as_list(), self.activation)
if hasattr(self, 'gain'):
scale *= self.gain
outputs = tf.matmul(inputs, self.kernel * scale)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is None:
return outputs
return self.activation(outputs)
def set_gain(self, gain):
self.gain = gain
class Conv2DScaled(tf.layers.Conv2D):
def call(self, inputs):
scale = kaiming_scale(self.kernel.get_shape().as_list(), self.activation)
if hasattr(self, 'gain'):
scale *= self.gain
outputs = self._convolution_op(inputs, self.kernel * scale)
assert self.rank == 2
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias, DATA_FORMAT_ORDER[self.data_format])
if self.activation is None:
return outputs
return self.activation(outputs)
def set_gain(self, gain):
self.gain = gain
def conv2d_scaled(x, filters, kernel_size, strides=1, padding='same',
activation=None, gain=1, data_format='channels_first', **kwargs):
layer = Conv2DScaled(filters, kernel_size, strides, padding,
activation=activation,
data_format=data_format,
kernel_initializer=tf.initializers.random_normal(stddev=1.), **kwargs)
layer.set_gain(gain)
return layer.apply(x)
def dense_scaled(x, filters, activation=tf.nn.leaky_relu, gain=1, **kwargs):
layer = DenseScaled(filters,
activation=activation,
kernel_initializer=tf.initializers.random_normal(stddev=1.),
**kwargs)
layer.set_gain(gain)
return layer.apply(x)
def channel_norm(x):
"""Channel normalization.
Args:
x: nD tensor with channels in dimension 1.
Returns:
nD tensor with normalized channels.
"""
return x * tf.rsqrt(tf.reduce_mean(tf.square(x), [1], keepdims=True) + 1e-8)
def minibatch_mean_stddev(x):
"""Computes the standard deviation average.
This is used by the discriminator as a form of batch discrimination.
Args:
x: nD tensor for which to compute standard deviation average.
Returns:
a scalar, the mean standard deviation of variable x.
"""
mean = tf.reduce_mean(x, 0, keepdims=True)
vals = tf.sqrt(tf.reduce_mean(tf.squared_difference(x, mean), 0) + 1e-8)
vals = tf.reduce_mean(vals)
return vals
def scalar_concat(x, scalar):
"""Concatenate a scalar to a 4D tensor as an extra channel.
Args:
x: 4D image tensor in NCHW format.
scalar: a scalar to concatenate to the tensor.
Returns:
a 4D tensor with one extra channel containing the value scalar at
every position.
"""
s = tf.shape(x)
return tf.concat([x, tf.ones([s[0], 1, s[2], s[3]]) * scalar], axis=1)
class ClassBiasScale(tf.layers.Layer):
"""For a class c, return x*gamma[c] + beta[c]"""
def __init__(self, nclass, name=None, trainable=True, **kwargs):
super(ClassBiasScale, self).__init__(
name=name, trainable=trainable, **kwargs)
self.nclass = nclass
self.gamma = None
self.beta = None
def build(self, input_shape):
self.beta = self.add_variable(name='beta', shape=[self.nclass, input_shape[1]], dtype=tf.float32,
initializer=tf.initializers.zeros, trainable=True)
self.gamma = self.add_variable(name='gamma', shape=[self.nclass, input_shape[1]], dtype=tf.float32,
initializer=tf.initializers.zeros, trainable=True)
self.built = True
def call(self, inputs, labels):
ndims = len(inputs.get_shape())
with tf.colocate_with(self.beta):
beta = tf.gather(self.beta, labels)
with tf.colocate_with(self.gamma):
gamma = tf.gather(self.gamma, labels)
gamma = tf.nn.sigmoid(gamma)
reshape = [tf.shape(inputs)[0], inputs.shape[1]] + [1] * (ndims - 2)
return inputs * tf.reshape(gamma, reshape) + tf.reshape(beta, reshape)
def compute_output_shape(self, input_shape):
return input_shape
def conv2d_mono(x, kernel, order=NCHW):
"""2D convolution using the same filter for every channel.
:param x: 4D input tensor of the images.
:param kernel: 2D input tensor of the convolution to apply.
:param order: enum {NCHW, NHWC}, the format of the input tensor.
:return: a 4D output tensor resulting from the convolution.
"""
y = x if order == NCHW else tf.transpose(x, [0, 3, 1, 2])
s = smart_shape(y)
y = tf.reshape(y, [s[0] * s[1], 1, s[2], s[3]])
y = tf.nn.conv2d(y, kernel[:, :, None, None], [1] * 4, 'VALID', data_format=NCHW)
t = smart_shape(y)
y = tf.reshape(y, [s[0], s[1], t[2], t[3]])
return y if order == NCHW else tf.transpose(y, [0, 2, 3, 1])
def class_bias_scale(inputs, labels, nclass):
"""For a class c, return x*gamma[c] + beta[c]"""
layer = ClassBiasScale(nclass)
return layer.apply(inputs, labels)
def blur_kernel_area(radius):
"""Compute an area blurring kernel.
:param radius: float in [0, inf[, the ratio of the area.
:return: a 2D convolution kernel.
"""
radius = max(radius, 1e-8)
cr = 1 + round(math.ceil(radius))
m = np.ones((cr, cr), 'f')
m[-1] *= (radius + 2 - cr)
m[:, -1] *= (radius + 2 - cr)
m = np.concatenate([m[::-1], m[1:]], axis=0)
m = np.concatenate([m[:, ::-1], m[:, 1:]], axis=1)
return m / m.sum()
def blur_apply(x, kernel, order=NCHW):
h, w = kernel.shape[0], kernel.shape[1]
if order == NCHW:
x = tf.pad(x, [[0] * 2, [0] * 2, [h // 2] * 2, [w // 2] * 2], 'REFLECT')
else:
x = tf.pad(x, [[0] * 2, [h // 2] * 2, [w // 2] * 2, [0] * 2], 'REFLECT')
return conv2d_mono(x, kernel, order)
| |
#!/usr/bin/python
#
# Copyright (c) 2011 The Lioncoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class LioncoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = LioncoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| |
# Copyright (c) 2011 Bastian Venthur
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Python library for the AR.Drone.
This module was tested with Python 2.6.6 and AR.Drone vanilla firmware 1.5.1.
"""
import socket
import struct
import sys
import threading
import multiprocessing
import arnetwork
__author__ = "Bastian Venthur"
ARDRONE_NAVDATA_PORT = 5554
ARDRONE_VIDEO_PORT = 5555
ARDRONE_COMMAND_PORT = 5556
class ARDrone(object):
"""ARDrone Class.
Instanciate this class to control your drone and receive decoded video and
navdata.
"""
def __init__(self):
self.seq_nr = 1
self.timer_t = 0.2
self.com_watchdog_timer = threading.Timer(self.timer_t, self.commwdg)
self.lock = threading.Lock()
self.speed = 0.2
self.at(at_config, "general:navdata_demo", "TRUE")
self.video_pipe, video_pipe_other = multiprocessing.Pipe()
self.nav_pipe, nav_pipe_other = multiprocessing.Pipe()
self.com_pipe, com_pipe_other = multiprocessing.Pipe()
self.network_process = arnetwork.ARDroneNetworkProcess(nav_pipe_other, video_pipe_other, com_pipe_other)
self.network_process.start()
self.ipc_thread = arnetwork.IPCThread(self)
self.ipc_thread.start()
self.image = ""
self.navdata = dict()
self.time = 0
def takeoff(self):
"""Make the drone takeoff."""
self.at(at_ftrim)
self.at(at_config, "control:altitude_max", "20000")
self.at(at_ref, True)
def land(self):
"""Make the drone land."""
self.at(at_ref, False)
def hover(self):
"""Make the drone hover."""
self.at(at_pcmd, False, 0, 0, 0, 0)
def move_left(self):
"""Make the drone move left."""
self.at(at_pcmd, True, -self.speed, 0, 0, 0)
def move_right(self):
"""Make the drone move right."""
self.at(at_pcmd, True, self.speed, 0, 0, 0)
def move_up(self):
"""Make the drone rise upwards."""
self.at(at_pcmd, True, 0, 0, self.speed, 0)
def move_down(self):
"""Make the drone decent downwards."""
self.at(at_pcmd, True, 0, 0, -self.speed, 0)
def move_forward(self):
"""Make the drone move forward."""
self.at(at_pcmd, True, 0, -self.speed, 0, 0)
def move_backward(self):
"""Make the drone move backwards."""
self.at(at_pcmd, True, 0, self.speed, 0, 0)
def turn_left(self):
"""Make the drone rotate left."""
self.at(at_pcmd, True, 0, 0, 0, -self.speed)
def turn_right(self):
"""Make the drone rotate right."""
self.at(at_pcmd, True, 0, 0, 0, self.speed)
def perform_op(self, mlmr, mfmb, mumd, rlrr):
"""mlmr, -mfmb, mumd, rlrr preforms the set operation positive is right, forward, and rotate right speeds should be in the range of -1 to 1"""
self.at(at_pcmd, True, mlmr, -mfmb, mumd, rlrr)
def reset(self):
"""Toggle the drone's emergency state."""
self.at(at_ref, False, True)
self.at(at_ref, False, False)
def trim(self):
"""Flat trim the drone."""
self.at(at_ftrim)
def set_speed(self, speed):
"""Set the drone's speed.
Valid values are floats from [0..1]
"""
self.speed = speed
def at(self, cmd, *args, **kwargs):
"""Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
"""
self.lock.acquire()
self.com_watchdog_timer.cancel()
cmd(self.seq_nr, *args, **kwargs)
self.seq_nr += 1
self.com_watchdog_timer = threading.Timer(self.timer_t, self.commwdg)
self.com_watchdog_timer.start()
self.lock.release()
def commwdg(self):
"""Communication watchdog signal.
This needs to be send regulary to keep the communication w/ the drone
alive.
"""
self.at(at_comwdg)
def halt(self):
"""Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
"""
self.lock.acquire()
self.com_watchdog_timer.cancel()
self.com_pipe.send('die!')
self.network_process.terminate()
self.network_process.join()
self.ipc_thread.stop()
self.ipc_thread.join()
self.lock.release()
###############################################################################
### Low level AT Commands
###############################################################################
def at_ref(seq, takeoff, emergency=False):
"""
Basic behaviour of the drone: take-off/landing, emergency stop/reset)
Parameters:
seq -- sequence number
takeoff -- True: Takeoff / False: Land
emergency -- True: Turn of the engines
"""
p = 0b10001010101000000000000000000
if takeoff:
p += 0b1000000000
if emergency:
p += 0b0100000000
at("REF", seq, [p])
def at_pcmd(seq, progressive, lr, fb, vv, va):
"""
Makes the drone move (translate/rotate).
Parameters:
seq -- sequence number
progressive -- True: enable progressive commands, False: disable (i.e.
enable hovering mode)
lr -- left-right tilt: float [-1..1] negative: left, positive: right
rb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right
The above float values are a percentage of the maximum speed.
"""
p = 1 if progressive else 0
at("PCMD", seq, [p, float(lr), float(fb), float(vv), float(va)])
def at_ftrim(seq):
"""
Tell the drone it's lying horizontally.
Parameters:
seq -- sequence number
"""
at("FTRIM", seq, [])
def at_zap(seq, stream):
"""
Selects which video stream to send on the video UDP port.
Parameters:
seq -- sequence number
stream -- Integer: video stream to broadcast
"""
# FIXME: improve parameters to select the modes directly
at("ZAP", seq, [stream])
def at_config(seq, option, value):
"""Set configuration parameters of the drone."""
at("CONFIG", seq, [str(option), str(value)])
def at_comwdg(seq):
"""
Reset communication watchdog.
"""
# FIXME: no sequence number
at("COMWDG", seq, [])
def at_aflight(seq, flag):
"""
Makes the drone fly autonomously.
Parameters:
seq -- sequence number
flag -- Integer: 1: start flight, 0: stop flight
"""
at("AFLIGHT", seq, [flag])
def at_pwm(seq, m1, m2, m3, m4):
"""
Sends control values directly to the engines, overriding control loops.
Parameters:
seq -- sequence number
m1 -- front left command
m2 -- fright right command
m3 -- back right command
m4 -- back left command
"""
# FIXME: what type do mx have?
pass
def at_led(seq, anim, f, d):
"""
Control the drones LED.
Parameters:
seq -- sequence number
anim -- Integer: animation to play
f -- ?: frequence in HZ of the animation
d -- Integer: total duration in seconds of the animation
"""
pass
def at_anim(seq, anim, d):
"""
Makes the drone execute a predefined movement (animation).
Parameters:
seq -- sequcence number
anim -- Integer: animation to play
d -- Integer: total duration in sections of the animation
"""
at("ANIM", seq, [anim, d])
def at(command, seq, params):
"""
Parameters:
command -- the command
seq -- the sequence number
params -- a list of elements which can be either int, float or string
"""
param_str = ''
for p in params:
if type(p) == int:
param_str += ",%d" % p
elif type(p) == float:
param_str += ",%d" % f2i(p)
elif type(p) == str:
param_str += ',"'+p+'"'
msg = "AT*%s=%i%s\r" % (command, seq, param_str)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(msg, ("192.168.1.1", ARDRONE_COMMAND_PORT))
def f2i(f):
"""Interpret IEEE-754 floating-point value as signed integer.
Arguments:
f -- floating point value
"""
return struct.unpack('i', struct.pack('f', f))[0]
###############################################################################
### navdata
###############################################################################
def decode_navdata(packet):
"""Decode a navdata packet."""
offset = 0
_ = struct.unpack_from("IIII", packet, offset)
drone_state = dict()
drone_state['fly_mask'] = _[1] & 1 # FLY MASK : (0) ardrone is landed, (1) ardrone is flying
drone_state['video_mask'] = _[1] >> 1 & 1 # VIDEO MASK : (0) video disable, (1) video enable
drone_state['vision_mask'] = _[1] >> 2 & 1 # VISION MASK : (0) vision disable, (1) vision enable */
drone_state['control_mask'] = _[1] >> 3 & 1 # CONTROL ALGO (0) euler angles control, (1) angular speed control */
drone_state['altitude_mask'] = _[1] >> 4 & 1 # ALTITUDE CONTROL ALGO : (0) altitude control inactive (1) altitude control active */
drone_state['user_feedback_start'] = _[1] >> 5 & 1 # USER feedback : Start button state */
drone_state['command_mask'] = _[1] >> 6 & 1 # Control command ACK : (0) None, (1) one received */
drone_state['fw_file_mask'] = _[1] >> 7 & 1 # Firmware file is good (1) */
drone_state['fw_ver_mask'] = _[1] >> 8 & 1 # Firmware update is newer (1) */
drone_state['fw_upd_mask'] = _[1] >> 9 & 1 # Firmware update is ongoing (1) */
drone_state['navdata_demo_mask'] = _[1] >> 10 & 1 # Navdata demo : (0) All navdata, (1) only navdata demo */
drone_state['navdata_bootstrap'] = _[1] >> 11 & 1 # Navdata bootstrap : (0) options sent in all or demo mode, (1) no navdata options sent */
drone_state['motors_mask'] = _[1] >> 12 & 1 # Motor status : (0) Ok, (1) Motors problem */
drone_state['com_lost_mask'] = _[1] >> 13 & 1 # Communication lost : (1) com problem, (0) Com is ok */
drone_state['vbat_low'] = _[1] >> 15 & 1 # VBat low : (1) too low, (0) Ok */
drone_state['user_el'] = _[1] >> 16 & 1 # User Emergency Landing : (1) User EL is ON, (0) User EL is OFF*/
drone_state['timer_elapsed'] = _[1] >> 17 & 1 # Timer elapsed : (1) elapsed, (0) not elapsed */
drone_state['angles_out_of_range'] = _[1] >> 19 & 1 # Angles : (0) Ok, (1) out of range */
drone_state['ultrasound_mask'] = _[1] >> 21 & 1 # Ultrasonic sensor : (0) Ok, (1) deaf */
drone_state['cutout_mask'] = _[1] >> 22 & 1 # Cutout system detection : (0) Not detected, (1) detected */
drone_state['pic_version_mask'] = _[1] >> 23 & 1 # PIC Version number OK : (0) a bad version number, (1) version number is OK */
drone_state['atcodec_thread_on'] = _[1] >> 24 & 1 # ATCodec thread ON : (0) thread OFF (1) thread ON */
drone_state['navdata_thread_on'] = _[1] >> 25 & 1 # Navdata thread ON : (0) thread OFF (1) thread ON */
drone_state['video_thread_on'] = _[1] >> 26 & 1 # Video thread ON : (0) thread OFF (1) thread ON */
drone_state['acq_thread_on'] = _[1] >> 27 & 1 # Acquisition thread ON : (0) thread OFF (1) thread ON */
drone_state['ctrl_watchdog_mask'] = _[1] >> 28 & 1 # CTRL watchdog : (1) delay in control execution (> 5ms), (0) control is well scheduled */
drone_state['adc_watchdog_mask'] = _[1] >> 29 & 1 # ADC Watchdog : (1) delay in uart2 dsr (> 5ms), (0) uart2 is good */
drone_state['com_watchdog_mask'] = _[1] >> 30 & 1 # Communication Watchdog : (1) com problem, (0) Com is ok */
drone_state['emergency_mask'] = _[1] >> 31 & 1 # Emergency landing : (0) no emergency, (1) emergency */
data = dict()
data['drone_state'] = drone_state
data['header'] = _[0]
data['seq_nr'] = _[2]
data['vision_flag'] = _[3]
offset += struct.calcsize("IIII")
while 1:
try:
id_nr, size = struct.unpack_from("HH", packet, offset)
offset += struct.calcsize("HH")
except struct.error:
break
values = []
for i in range(size-struct.calcsize("HH")):
values.append(struct.unpack_from("c", packet, offset)[0])
offset += struct.calcsize("c")
# navdata_tag_t in navdata-common.h
if id_nr == 0:
values = struct.unpack_from("IIfffIfffI", "".join(values))
values = dict(zip(['ctrl_state', 'battery', 'theta', 'phi', 'psi', 'altitude', 'vx', 'vy', 'vz', 'num_frames'], values))
# convert the millidegrees into degrees and round to int, as they
# are not so precise anyways
for i in 'theta', 'phi', 'psi':
values[i] = int(values[i] / 1000)
#values[i] /= 1000
data[id_nr] = values
return data
if __name__ == "__main__":
import termios
import fcntl
import os
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
drone = ARDrone()
try:
while 1:
try:
c = sys.stdin.read(1)
c = c.lower()
print "Got character", c
if c == 'a':
drone.move_left()
if c == 'd':
drone.move_right()
if c == 'w':
drone.move_forward()
if c == 's':
drone.move_backward()
if c == ' ':
drone.land()
if c == '\n':
drone.takeoff()
if c == 'q':
drone.turn_left()
if c == 'e':
drone.turn_right()
if c == '1':
drone.move_up()
if c == '2':
drone.hover()
if c == '3':
drone.move_down()
if c == 't':
drone.reset()
if c == 'x':
drone.hover()
if c == 'y':
drone.trim()
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
drone.halt()
| |
import numpy as np
import pandas as pd
import pyflux as pf
# Set up some data to use for the tests
noise = np.random.normal(0,1,400)
y = np.zeros(400)
x1 = np.random.normal(0,1,400)
x2 = np.random.normal(0,1,400)
for i in range(1,len(y)):
y[i] = 0.9*y[i-1] + noise[i] + 0.1*x1[i] - 0.3*x2[i]
data = pd.DataFrame([y,x1,x2]).T
data.columns = ['y', 'x1', 'x2']
countdata = np.random.poisson(3,300)
x1 = np.random.normal(0,1,300)
x2 = np.random.normal(0,1,300)
data2 = pd.DataFrame([countdata,x1,x2]).T
data2.columns = ['y', 'x1', 'x2']
y_oos = np.random.normal(0,1,30)
x1_oos = np.random.normal(0,1,30)
x2_oos = np.random.normal(0,1,30)
countdata_oos = np.random.poisson(3,30)
data_oos = pd.DataFrame([y_oos,x1_oos,x2_oos]).T
data_oos.columns = ['y', 'x1', 'x2']
data2_oos = pd.DataFrame([countdata_oos,x1_oos,x2_oos]).T
data2_oos.columns = ['y', 'x1', 'x2']
model_1 = pf.GASX(formula="y ~ x1", data=data, ar=0, sc=0, family=pf.t())
x_1 = model_1.fit()
model_2 = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.t())
x_2 = model_2.fit()
model_3 = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, integ=1, family=pf.t())
x_3 = model_3.fit()
model_4 = pf.GASX(formula="y ~ x1", data=data, ar=2, sc=2, family=pf.t())
x_4 = model_4.fit()
model_b_1 = pf.GASX(formula="y ~ x1 + x2", data=data, ar=0, sc=0, family=pf.t())
x_1 = model_b_1.fit()
model_b_2 = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.t())
x_2 = model_b_2.fit()
model_b_3 = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, integ=1, family=pf.t())
x_3 = model_b_3.fit()
model_b_4 = pf.GASX(formula="y ~ x1 + x2", data=data, ar=2, sc=2, family=pf.t())
x_4 = model_b_4.fit()
def test_no_terms():
"""
Tests the length of the latent variable vector for an GASX model
with no AR or SC terms, and tests that the values are not nan
"""
assert(len(model_1.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model_1.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_couple_terms():
"""
Tests the length of the latent variable vector for an GASX model
with 1 AR and 1 SC term, and tests that the values are not nan
"""
assert(len(model_2.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model_2.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_couple_terms_integ():
"""
Tests the length of the latent variable vector for an GASX model
with 1 AR and 1 SC term and integrated once, and tests that the
values are not nan
"""
assert(len(model_3.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model_3.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_bbvi():
"""
Tests an GASX model estimated with BBVI, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_bbvi_mini_batch():
"""
Tests an GASX model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI',iterations=500, mini_batch=32)
assert(len(model.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_bbvi_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI',iterations=500, record_elbo=True, map_start=False)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_bbvi_mini_batch_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI',iterations=500, mini_batch=32, record_elbo=True, map_start=False)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_mh():
"""
Tests an GASX model estimated with Metropolis-Hastings, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_laplace():
"""
Tests an GASX model estimated with Laplace approximation, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_pml():
"""
Tests an GASX model estimated with PML, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_predict_length():
"""
Tests that the length of the predict dataframe is equal to no of steps h
"""
assert(model_4.predict(h=5, oos_data=data_oos).shape[0] == 5)
def test_predict_is_length():
"""
Tests that the length of the predict IS dataframe is equal to no of steps h
"""
assert(model_4.predict_is(h=5).shape[0] == 5)
def test_predict_nans():
"""
Tests that the predictions are not NaNs
"""
assert(len(model_4.predict(h=5, oos_data=data_oos).values[np.isnan(model_4.predict(h=5,
oos_data=data_oos).values)]) == 0)
def test_predict_is_nans():
"""
Tests that the predictions in-sample are not NaNs
"""
assert(len(model_4.predict_is(h=5).values[np.isnan(model_4.predict_is(h=5).values)]) == 0)
def test_predict_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.t())
x = model.fit()
predictions = model.predict(h=10, oos_data=data_oos, intervals=False)
assert(not np.all(predictions.values==predictions.values[0]))
def test_predict_is_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
predictions = model_2.predict_is(h=10, intervals=False)
assert(not np.all(predictions.values==predictions.values[0]))
def test_predict_intervals():
"""
Tests prediction intervals are ordered correctly
"""
predictions = model_1.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals():
"""
Tests prediction intervals are ordered correctly
"""
predictions = model_1.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI', iterations=100)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI', iterations=100)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('M-H', nsims=400)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('M-H', nsims=400)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_sample_model():
"""
Tests sampling function
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI', iterations=100)
sample = model.sample(nsims=100)
assert(sample.shape[0]==100)
assert(sample.shape[1]==len(data)-1)
def test_ppc():
"""
Tests PPC value
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI', iterations=100)
p_value = model.ppc()
assert(0.0 <= p_value <= 1.0)
## Try more than one predictor
def test2_no_terms():
"""
Tests the length of the latent variable vector for an GASX model
with no AR or SC terms, and two predictors, and tests that the values
are not nan
"""
assert(len(model_b_1.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model_b_1.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_couple_terms():
"""
Tests the length of the latent variable vector for an GASX model
with 1 AR and 1 SC term, and two predictors, and tests that the values
are not nan
"""
assert(len(model_b_2.latent_variables.z_list) == 7)
lvs = np.array([i.value for i in model_b_2.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_bbvi():
"""
Tests an GASX model estimated with BBVI, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI',iterations=500)
assert(len(model.latent_variables.z_list) == 7)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_bbvi_mini_batch():
"""
Tests an GASX model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI',iterations=500, mini_batch=32)
assert(len(model.latent_variables.z_list) == 7)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_bbvi_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI',iterations=500, record_elbo=True, map_start=False)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test2_bbvi_mini_batch_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI',iterations=500, mini_batch=32, record_elbo=True, map_start=False)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test2_mh():
"""
Tests an GASX model estimated with MEtropolis-Hastings, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 7)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_laplace():
"""
Tests an GASX model estimated with Laplace, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 7)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_pml():
"""
Tests an GASX model estimated with PML, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 7)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_predict_length():
"""
Tests that the length of the predict dataframe is equal to no of steps h
"""
assert(model_b_2.predict(h=5, oos_data=data_oos).shape[0] == 5)
def test2_predict_is_length():
"""
Tests that the length of the predict IS dataframe is equal to no of steps h
"""
assert(model_b_2.predict_is(h=5).shape[0] == 5)
def test2_predict_nans():
"""
Tests that the predictions are not NaNs
"""
assert(len(model_b_2.predict(h=5, oos_data=data_oos).values[np.isnan(model_b_2.predict(h=5,
oos_data=data_oos).values)]) == 0)
def test2_predict_is_nans():
"""
Tests that the predictions in-sample are not NaNs
"""
assert(len(model_b_2.predict_is(h=5).values[np.isnan(model_b_2.predict_is(h=5).values)]) == 0)
def test2_predict_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
predictions = model_b_2.predict(h=10, oos_data=data_oos, intervals=False)
assert(not np.all(predictions.values==predictions.values[0]))
def test2_predict_is_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
predictions = model_b_2.predict_is(h=10, intervals=False)
assert(not np.all(predictions.values==predictions.values[0]))
def test2_predict_intervals():
"""
Tests prediction intervals are ordered correctly
"""
predictions = model_b_2.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_is_intervals():
"""
Tests prediction intervals are ordered correctly
"""
predictions = model_b_2.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI', iterations=100)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_is_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI', iterations=100)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('M-H', nsims=400)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_is_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('M-H', nsims=400)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_sample_model():
"""
Tests sampling function
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI', iterations=100)
sample = model.sample(nsims=100)
assert(sample.shape[0]==100)
assert(sample.shape[1]==len(data)-1)
def test2_ppc():
"""
Tests PPC value
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.t())
x = model.fit('BBVI', iterations=100)
p_value = model.ppc()
assert(0.0 <= p_value <= 1.0)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for normalization layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import normalization
from tensorflow.python.keras.layers import normalization_v2
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class BatchNormalizationTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_batchnorm(self):
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'momentum': 0.9,
'epsilon': 0.1,
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
'moving_mean_initializer': 'zeros',
'moving_variance_initializer': 'ones'
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'momentum': 0.9,
'epsilon': 0.1,
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01),
'dtype': 'float64'
},
input_shape=(3, 2, 2, 2), input_dtype='float64')
@tf_test_util.run_in_graph_and_eager_modes
def test_batchnorm_weights(self):
layer = keras.layers.BatchNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 2)
layer = keras.layers.BatchNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 4)
@tf_test_util.run_in_graph_and_eager_modes
def test_batchnorm_regularization(self):
layer = keras.layers.BatchNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.BatchNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_convnet(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=1, input_shape=(3, 4, 4), momentum=0.8)
model.add(norm)
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1))
np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_convnet_channel_last(self):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(4, 4, 3), momentum=0.8)
model.add(norm)
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_correctness(self):
_run_batchnorm_correctness_test(
normalization.BatchNormalization, dtype='float32')
_run_batchnorm_correctness_test(
normalization_v2.BatchNormalization, dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_batchnorm_mixed_precision(self):
_run_batchnorm_correctness_test(
normalization.BatchNormalization, dtype='float16')
_run_batchnorm_correctness_test(
normalization_v2.BatchNormalization, dtype='float16')
@tf_test_util.run_in_graph_and_eager_modes
@testing_utils.enable_v2_dtype_behavior
def test_batchnorm_policy(self):
norm = keras.layers.BatchNormalization(
axis=-1,
input_shape=(4, 4, 3),
momentum=0.8,
dtype=policy.Policy('mixed_float16'))
x = np.random.normal(size=(10, 4, 4, 3))
y = norm(x)
self.assertEqual(y.dtype, 'float16')
self.assertEqual(norm.beta.dtype.base_dtype, 'float32')
self.assertEqual(norm.gamma.dtype.base_dtype, 'float32')
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_batchnorm_non_trainable_with_fit(self):
inputs = keras.Input((3,))
bn = normalization_v2.BatchNormalization()
outputs = bn(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(np.random.random((100, 3)), np.random.random((100, 3)))
test_data = np.random.random((10, 3))
test_targets = np.random.random((10, 3))
test_loss = model.evaluate(test_data, test_targets)
bn.trainable = False
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
train_loss = model.train_on_batch(test_data, test_targets)
self.assertAlmostEqual(test_loss, train_loss)
@tf_test_util.run_in_graph_and_eager_modes
def test_batchnorm_non_trainable_with_tf_function(self):
inputs = keras.Input((3,))
bn = normalization_v2.BatchNormalization()
outputs = bn(inputs)
model = keras.Model(inputs, outputs)
loss_fn = keras.losses.MeanSquaredError()
optimizer = rmsprop_v2.RMSprop()
@def_function.function()
def train_step(x, y):
with backprop.GradientTape() as tape:
y_pred = model(x, training=True)
loss = loss_fn(y, y_pred)
grads = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
return loss
@def_function.function()
def test_step(x, y):
y_pred = model(x, training=False)
loss = loss_fn(y, y_pred)
return loss
train_step(np.random.random((100, 3)), np.random.random((100, 3)))
test_data = np.random.random((10, 3))
test_targets = np.random.random((10, 3))
test_loss = test_step(test_data, test_targets)
bn.trainable = False
train_loss = train_step(test_data, test_targets)
if context.executing_eagerly():
self.assertAlmostEqual(test_loss.numpy(), train_loss.numpy())
def test_eager_batchnorm_in_custom_model_call_with_tf_function(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.bn = keras.layers.BatchNormalization()
@def_function.function()
def call(self, x, training):
return self.bn(x, training=training)
with context.eager_mode():
model = MyModel()
for _ in range(10):
x = constant_op.constant(0.5, shape=[1, 1])
model(x, training=True)
# Make sure the moving mean and variance have been updated
self.assertAllClose(model.bn.moving_mean.numpy(), [0.047], atol=3e-3)
self.assertAllClose(model.bn.moving_variance.numpy(), [0.9], atol=3e-2)
class BatchNormalizationV1Test(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_v1_fused_attribute(self):
norm = normalization.BatchNormalization()
inp = keras.layers.Input((4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = normalization.BatchNormalization(fused=False)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization.BatchNormalization(virtual_batch_size=2)
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(2, 2, 2))
norm(inp)
self.assertEqual(norm.fused, False)
class BatchNormalizationV2Test(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_batchnorm_v2(self):
testing_utils.layer_test(
normalization_v2.BatchNormalization,
kwargs={'fused': True},
input_shape=(3, 3, 3, 3))
testing_utils.layer_test(
normalization_v2.BatchNormalization,
kwargs={'fused': None},
input_shape=(3, 3, 3))
@tf_test_util.run_in_graph_and_eager_modes
def test_v2_fused_attribute(self):
norm = normalization_v2.BatchNormalization()
self.assertEqual(norm.fused, None)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = normalization_v2.BatchNormalization()
self.assertEqual(norm.fused, None)
inp = keras.layers.Input(shape=(4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization_v2.BatchNormalization(virtual_batch_size=2)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization_v2.BatchNormalization(fused=False)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization_v2.BatchNormalization(fused=True, axis=[3])
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
with self.assertRaisesRegexp(ValueError, 'fused.*renorm'):
normalization_v2.BatchNormalization(fused=True, renorm=True)
with self.assertRaisesRegexp(ValueError, 'fused.*when axis is 1 or 3'):
normalization_v2.BatchNormalization(fused=True, axis=2)
with self.assertRaisesRegexp(ValueError, 'fused.*when axis is 1 or 3'):
normalization_v2.BatchNormalization(fused=True, axis=[1, 3])
with self.assertRaisesRegexp(ValueError, 'fused.*virtual_batch_size'):
normalization_v2.BatchNormalization(fused=True, virtual_batch_size=2)
with self.assertRaisesRegexp(ValueError, 'fused.*adjustment'):
normalization_v2.BatchNormalization(fused=True,
adjustment=lambda _: (1, 0))
norm = normalization_v2.BatchNormalization(fused=True)
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(4, 4))
with self.assertRaisesRegexp(ValueError, '4D input tensors'):
norm(inp)
def test_updates_in_wrap_function(self):
with context.eager_mode():
layer = keras.layers.BatchNormalization()
def my_func():
x = array_ops.ones((10, 1))
return layer(x, training=True)
wrapped_fn = wrap_function.wrap_function(my_func, [])
wrapped_fn()
# Updates should be tracked in a `wrap_function`.
self.assertLen(layer.updates, 2)
def _run_batchnorm_correctness_test(layer, dtype='float32', fused=False):
model = keras.models.Sequential()
model.add(keras.Input(shape=(2, 2, 2), dtype=dtype))
norm = layer(momentum=0.8, fused=fused)
model.add(norm)
if dtype == 'float16':
# Keras models require float32 losses.
model.add(keras.layers.Lambda(lambda x: keras.backend.cast(x, 'float32')))
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2))
.astype(dtype))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=2e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=2e-1)
@parameterized.parameters(
[normalization.BatchNormalization, normalization_v2.BatchNormalization])
class NormalizationLayersGraphModeOnlyTest(
test.TestCase, parameterized.TestCase):
def test_shared_batchnorm(self, layer):
"""Test that a BN layer can be shared across different data streams."""
with self.cached_session():
# Test single layer reuse
bn = layer()
x1 = keras.layers.Input(shape=(10,))
_ = bn(x1)
x2 = keras.layers.Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = keras.models.Model(x2, y2)
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
model.train_on_batch(x, x)
self.assertLen(bn.updates, 4)
self.assertLen(bn.get_updates_for(x1), 2)
self.assertLen(model.get_updates_for(x2), 2)
# Test model-level reuse
x3 = keras.layers.Input(shape=(10,))
y3 = model(x3)
new_model = keras.models.Model(x3, y3, name='new_model')
self.assertLen(new_model.updates, 6)
self.assertLen(model.updates, 6)
self.assertLen(new_model.get_updates_for(x3), 2)
new_model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
new_model.train_on_batch(x, x)
def test_that_trainable_disables_updates(self, layer):
with self.cached_session():
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
a = keras.layers.Input(shape=(4,))
layer = layer(input_shape=(4,))
b = layer(a)
model = keras.models.Model(a, b)
model.trainable = False
assert not model.updates
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
assert model.updates
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
layer.trainable = False
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
def test_batchnorm_trainable(self, layer):
"""Tests that batchnorm layer is trainable when learning phase is enabled.
Computes mean and std for current inputs then
applies batch normalization using them.
Args:
layer: Either V1 or V2 of BatchNormalization layer.
"""
# TODO(fchollet): enable in all execution modes when issue with
# learning phase setting is resolved.
with ops.Graph().as_default(), self.cached_session():
bn_mean = 0.5
bn_std = 10.
val_a = np.expand_dims(np.arange(10.), axis=1)
def get_model(bn_mean, bn_std):
inp = keras.layers.Input(shape=(1,))
x = layer()(inp)
model1 = keras.models.Model(inp, x)
model1.set_weights([
np.array([1.]),
np.array([0.]),
np.array([bn_mean]),
np.array([bn_std**2])
])
return model1
# Simulates training-mode with trainable layer.
# Should use mini-batch statistics.
with keras.backend.learning_phase_scope(1):
model = get_model(bn_mean, bn_std)
model.compile(loss='mse', optimizer='rmsprop')
out = model.predict(val_a)
self.assertAllClose(
(val_a - np.mean(val_a)) / np.std(val_a), out, atol=1e-3)
def _run_layernorm_correctness_test(layer, dtype='float32'):
model = keras.models.Sequential()
model.add(keras.layers.Lambda(lambda x: math_ops.cast(x, dtype='float16')))
norm = layer(input_shape=(2, 2, 2), dtype=dtype)
model.add(norm)
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2))
.astype(dtype))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
class LayerNormalizationTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_layernorm(self):
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': (-3, -2, -1)},
input_shape=(2, 8, 8, 3))
@keras_parameterized.run_all_keras_modes
def test_non_fused_layernorm(self):
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': -2},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': (-3, -2)},
input_shape=(2, 8, 8, 3))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': (-3, -1)},
input_shape=(2, 8, 8, 3))
@tf_test_util.run_in_graph_and_eager_modes
def test_layernorm_weights(self):
layer = keras.layers.LayerNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 0)
layer = keras.layers.LayerNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 2)
@tf_test_util.run_in_graph_and_eager_modes
def test_layernorm_regularization(self):
layer = keras.layers.LayerNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.LayerNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
@keras_parameterized.run_all_keras_modes
def test_layernorm_convnet_channel_last(self):
model = keras.models.Sequential()
norm = keras.layers.LayerNormalization(input_shape=(4, 4, 3))
model.add(norm)
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_layernorm_correctness(self):
_run_layernorm_correctness_test(
normalization.LayerNormalization, dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_layernorm_mixed_precision(self):
_run_layernorm_correctness_test(
normalization.LayerNormalization, dtype='float16')
@tf_test_util.run_in_graph_and_eager_modes
def testIncorrectAxisType(self):
with self.assertRaisesRegexp(
TypeError, r'Expected an int or a list/tuple of ints'):
_ = normalization.LayerNormalization(axis={'axis': -1})
@tf_test_util.run_in_graph_and_eager_modes
def testInvalidAxis(self):
with self.assertRaisesRegexp(ValueError, r'Invalid axis: 3'):
layer_norm = normalization.LayerNormalization(axis=3)
layer_norm.build(input_shape=(2, 2, 2))
@tf_test_util.run_in_graph_and_eager_modes
def testDuplicateAxis(self):
with self.assertRaisesRegexp(ValueError, r'Duplicate axis:'):
layer_norm = normalization.LayerNormalization(axis=[-1, -1])
layer_norm.build(input_shape=(2, 2, 2))
@tf_test_util.run_in_graph_and_eager_modes
def testFusedAttr(self):
layer_norm = normalization.LayerNormalization(axis=[-2, -1])
layer_norm.build(input_shape=(2, 2, 2))
self.assertEqual(layer_norm._fused, True)
class LayerNormalizationNumericsTest(keras_parameterized.TestCase):
"""Tests LayerNormalization has correct and numerically stable outputs."""
def _expected_layer_norm(self, x, beta, gamma, batch_input_shape, axis,
epsilon):
"""Returns the layer norm, which is computed using NumPy."""
broadcast_shape = [batch_input_shape[i] if i in axis else 1
for i in range(len(batch_input_shape))]
mean = np.mean(x, axis=axis, keepdims=True)
var = np.var(x, axis=axis, keepdims=True)
expected = (x - mean) / np.sqrt(var + epsilon)
expected *= np.reshape(gamma, broadcast_shape)
expected += np.reshape(beta, broadcast_shape)
return expected
def _test_forward_pass(self, batch_input_shape, axis, fp64_tol=1e-14,
fp32_tol=1e-6, fp16_tol=1e-2):
"""Tests the forward pass of layer normalization.
Args:
batch_input_shape: The input shape that will be used to test, including
the batch dimension.
axis: A list of axises to normalize. Will be passed to the `axis` argument
of LayerNormalization.
fp64_tol: The relative and absolute tolerance for float64.
fp32_tol: The relative and absolute tolerance for float32.
fp16_tol: The relative and absolute tolerance for float16.
"""
param_shape = [batch_input_shape[i] for i in axis]
param_elems = 1
for dim in param_shape:
param_elems *= dim
beta = np.arange(param_elems, dtype='float64').reshape(param_shape)
gamma = np.arange(1, param_elems + 1, dtype='float64').reshape(param_shape)
x = np.random.normal(size=batch_input_shape)
for epsilon in 1e-12, 1e-3:
expected = self._expected_layer_norm(x, beta, gamma, batch_input_shape,
axis, epsilon)
for dtype in 'float64', 'float32', 'float16':
norm = normalization.LayerNormalization(
axis=axis, dtype=dtype, batch_input_shape=batch_input_shape,
epsilon=epsilon, beta_initializer=keras.initializers.constant(beta),
gamma_initializer=keras.initializers.constant(gamma))
y = norm(keras.backend.cast(x, dtype))
actual = keras.backend.eval(y)
if dtype == 'float64':
tol = fp64_tol
elif dtype == 'float32':
tol = fp32_tol
else:
assert dtype == 'float16'
tol = fp16_tol
# We use absolute tolerances in addition to relative tolerances, because
# some of the values are very close to zero.
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
@tf_test_util.run_in_graph_and_eager_modes
def test_forward(self):
# For numeric stability, we ensure the axis's dimension(s) have at least 4
# elements.
self._test_forward_pass((4, 3), (0,))
self._test_forward_pass((3, 4), (1,))
self._test_forward_pass((4, 3, 2), (0,))
self._test_forward_pass((2, 4, 2), (1,))
self._test_forward_pass((2, 3, 4), (2,), fp16_tol=5e-2)
self._test_forward_pass((2, 3, 2), (0, 2))
self._test_forward_pass((2, 2, 2, 2), (1, 3))
self._test_forward_pass((2, 2, 2, 2), (2, 3))
self._test_forward_pass((2, 3, 4, 5), (3,))
def _test_backward_pass(self, batch_input_shape, axis, fp64_tol=1e-5,
fp32_tol=1e-5, fp16_tol=2e-2):
"""Tests the backwards pass of layer normalization.
Args:
batch_input_shape: The input shape that will be used to test, including
the batch dimension.
axis: A list of axises to normalize. Will be passed to the `axis` argument
of LayerNormalization.
fp64_tol: The relative and absolute tolerance for float64.
fp32_tol: The relative and absolute tolerance for float32.
fp16_tol: The relative and absolute tolerance for float16.
"""
param_shape = [batch_input_shape[i] for i in axis]
param_elems = 1
for dim in param_shape:
param_elems *= dim
beta = np.arange(param_elems, dtype='float64').reshape(param_shape)
gamma = np.arange(1, param_elems + 1, dtype='float64').reshape(param_shape)
x = np.random.normal(size=batch_input_shape)
for epsilon in 1e-12, 1e-3:
# Float64 must come first in this list, as we use the float64 numerical
# gradients to compare to the float32 and float16 symbolic gradients as
# well. Computing float32/float16 numerical gradients is too numerically
# unstable.
for dtype in 'float64', 'float32', 'float16':
norm = normalization.LayerNormalization(
axis=axis, dtype=dtype, batch_input_shape=batch_input_shape,
epsilon=epsilon, beta_initializer=keras.initializers.constant(beta),
gamma_initializer=keras.initializers.constant(gamma))
norm.build(x.shape)
# pylint: disable=cell-var-from-loop
def forward_fn(x, beta, gamma):
# We must monkey-patch the attributes of `norm` with the function
# arguments, so that the gradient checker will properly compute their
# gradients. The gradient checker computes gradients with respect to
# the input arguments of `f`.
with test.mock.patch.object(norm, 'beta', beta):
with test.mock.patch.object(norm, 'gamma', gamma):
return norm(x)
# pylint: enable=cell-var-from-loop
results = gradient_checker_v2.compute_gradient(
forward_fn, [keras.backend.cast(x, dtype), norm.beta, norm.gamma])
([x_grad_t, beta_grad_t, gamma_grad_t],
[x_grad_n, beta_grad_n, gamma_grad_n]) = results
if dtype == 'float64':
# We use the float64 numeric gradients as the reference, to compare
# against the symbolic gradients for all dtypes.
x_grad_ref = x_grad_n
beta_grad_ref = beta_grad_n
gamma_grad_ref = gamma_grad_n
tol = fp64_tol
elif dtype == 'float32':
tol = fp32_tol
else:
assert dtype == 'float16'
tol = fp16_tol
# We use absolute tolerances in addition to relative tolerances, because
# some of the values are very close to zero.
self.assertAllClose(x_grad_t, x_grad_ref, rtol=tol, atol=tol)
self.assertAllClose(beta_grad_t, beta_grad_ref, rtol=tol, atol=tol)
self.assertAllClose(gamma_grad_t, gamma_grad_ref, rtol=tol, atol=tol)
# The gradient_checker_v2 does not work properly with LayerNorm in graph mode.
@tf_test_util.run_v2_only
def test_backward(self):
# For numeric stability, we ensure the axis's dimension(s) have at least 4
# elements.
self._test_backward_pass((4, 3), (0,))
self._test_backward_pass((2, 4, 2), (1,))
self._test_backward_pass((2, 3, 4), (2,))
self._test_backward_pass((2, 3, 2), (0, 2), fp64_tol=5e-4, fp32_tol=5e-4)
self._test_backward_pass((2, 2, 2, 2), (1, 3))
self._test_backward_pass((2, 2, 2, 2), (2, 3))
if __name__ == '__main__':
test.main()
| |
import json
import numpy as np
from utils.cu_data import *
from utils.cu_io import IO
from pandas import DataFrame
from collections import OrderedDict
__author__ = 'Stone'
# Module 'listings' -- common listing data model manager
"""manager for common listing data manipulation
importing this listings class directly, you can manipulate
the listing data
"""
# class as a manager of the listing data
class Listings:
cols = ['title', 'manufacturer', 'currency', 'price']
def __init__(self, data):
"""
Initialize data from a data frame
:param data: data from data frame
:return:
"""
self.data = data
@classmethod
def from_json(cls, filename, *path):
"""
class method to initialize data from a JSON raw data
:param filename: the filename will be name and extension
e.g. "listings.txt"
:param path: the path will be positional arguments, so it can be appended
e.g. "..","data"
:return: init function with data frame argument
"""
data = DataFrame.from_records(IO().read_from_json_line(filename, *path))
return cls(data)
def __get__(self, obj):
"""
get series with a specific column
:param obj: will throw a error if the obj is None or not found in column
:return: yields a series
"""
if obj is None:
return None
if obj not in self.data:
raise AttributeError("unreadable attribute")
return self.data[obj]
def __getitem__(self, obj):
"""
get series with a specific column
:param obj: will throw a error if the obj is None or not found in column
:return: yields a series
"""
return self.__get__(obj)
def __set__(self, obj, value):
"""
set series with a value
:param obj: will throw a error if the obj is None or not found in column
:return:
"""
self.data.loc[:, obj] = value
def __setitem__(self, obj, value):
"""
set series with a value
:param obj: will throw a error if the obj is None or not found in column
:return:
"""
self.__set__(obj, value)
def __delete__(self, obj):
"""
drop series with a obj
:param obj: will throw a error if the obj is None or not found in column
:return:
"""
if obj not in self.data:
raise AttributeError("can't delete column")
self.data.drop([obj], axis=1, errors='ignore')
def get_by_index(self, i):
"""
get a row by the index
:param i: index to address the row
:return: series
"""
return self.data.loc[[i]]
def find_by_index_set(self, idx):
"""
get a listings by a list of index
:param idx: list of index
:return: listings instance
"""
return Listings(self.data.ix[idx])
def index_to_list(self):
"""
:return: listing of index
"""
return list(self.data.index)
def get_usd_price(self, rates):
"""
get the usd price with a current exchange rate
:param rates: the current exchange rates table
:return: the series of the usd price
"""
return self.data.apply(lambda row: rates.convert_to_usd(row.currency.upper(), row.price), axis=1)
def convert_to_usd_price(self, rates):
"""
set the usd price with a current exchange rate
:param rates: the current exchange rates table
:return: self instance
"""
self.data['USD'] = self.get_usd_price(rates)
return self
def add_lower_case(self, *cols):
"""
convert the every series to lower case
:param cols: the series to be converted, if None use default cols
:return: self instance
"""
if len(cols) < 1:
cols = Listings.cols
for col in cols:
self.data[col + "_lower"] = self.data[col].str.lower()
return self
def size(self):
"""
:return: the number of rows the data contains
"""
return self.data.shape[0]
def is_empty(self):
"""
:return: bool of the data if it is empty
"""
return self.size() < 1
def head(self, n=5):
"""
:param n: number of rows to be return
:return: the rows of records the data contains
"""
return self.data.head(n)
def all(self):
"""
:return: the cope of data
"""
return self.data.copy()
def index(self):
"""
:return: the list of the index in the data frame
"""
return self.data.index.values
def describe(self):
"""
:return: describe the data frame
"""
return self.data.describe()
def exclude(self, listings):
"""
advanced set operation on data frame
:param listings: the listings to be excluded from current set
based list of its indexes
:return: self instance
"""
self.data = self.data.loc[~self.data.index.isin(listings.index())]
return self
def append(self, listings):
"""
advanced set operation on data frame
:param listings: the listings to be appended to current set
:return:
"""
self.data = self.data.append(listings.all())
def to_json(self, orient="records"):
"""
Convert the object to a JSON string in the cols
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
:param orient: The format of the JSON string
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
- columns : dict like {column -> {index -> value}}
- values : just the values array
:return: json string
"""
return self.data.ix[:, self.cols].to_json(orient=orient)
def to_ordered_dict(self):
"""
maintenance the same order as the json string since the order will
be important when you verify the data
:return: return ordered dict of the json string
"""
return json.loads(self.to_json(), object_pairs_hook=OrderedDict)
def find_records_with_pms(self, manufacturers):
"""
find all records with a valid manufacturer either in manufacturer col or title col
greedy search
:param manufacturers: list of manufacturer
:return: a copy of the listings instance
"""
return Listings(self.data[(self.data.manufacturer_lower.apply(
lambda x: contains_in_str(x, manufacturers)
) | self.data.title.apply(
lambda x: search_in_title(x, manufacturers)
)) & self.data.price.apply(
lambda x: isfloat(x)
)])
def find_records_with_pm(self, pm):
"""
find all records with a valid manufacturer either in manufacturer col or title col starts with it
restrict search
:param pm: manufacturer
:return: a copy of data frame
"""
pm_reg = compile_man_reg(pm)
return self.data[self.data.manufacturer.apply(
lambda manufacturer: bool(pm_reg.search(manufacturer.lower()))
) | self.data.title.apply(
lambda title: title.lower().startswith(pm)
)]
def filtered_by_pm(self, pm):
"""
filters out the records do not contain a valid manufacturer either in manufacturer col or title col
greedy search
:param pm: manufacturer
:return: a copy of the listings instance
"""
return Listings(self.data[self.data.manufacturer_lower.apply(
lambda x: partial_match(pm, x) == 100
) | self.data.title_lower.apply(
lambda x: partial_match(pm, x) == 100
)])
def filtered_by_model(self, model):
"""
filters out the records do not contain the given model in the title
restrict search to find the model in title cols
:param model: model str
:return: a copy of the listings instance
"""
reg = compile_model_reg(model)
return Listings(self.data[self.data.title_lower.apply(lambda x: bool(reg.search(x)))])
def filtered_by_family(self, family):
"""
filters out the records do not contain the given family in the title
restrict search to find the model in title cols
they partial ratio default as 85, but might be configurable in the config file
:param family: family str
:return: a copy of the listings instance
"""
return Listings(self.data[self.data.title_lower.apply(
lambda x: (partial_match(family, x) > 85))])
def filtered_by_price(self, pm, family=None):
"""
filters out the records based on the given manufacturer and the mean of prices
restrict search to find the a listing of a valid price according to the Normal_distribution
(https://en.wikipedia.org/wiki/Normal_distribution)
according the fact known as the 68-95-99.7 (empirical) rule
the probability of a price higher then the corresponding thresholds are as the following:
68%, 95%, 99%
lo, lo2, lo3 = round(mean - std, 2), round(mean - std * 2, 2), round(mean - std * 3, 2)
hi, hi2, hi3 = round(mean + std, 2), round(mean + std * 2, 2), round(mean + std * 3, 2)
we consider that the manufacturer only found in title col using the first threshold
the manufacturer also found in manufacturer col and at the beginning of title using the second threshold
the overall probability will be within 68< x < 95. for this case should be 90%+ accurate
:param pm: manufacturer str
:param family: family str
:return: a copy of the listings instance
"""
if self.is_empty():
return self
if 'USD' not in self.data:
raise ValueError("column USD not exists!")
matched = self
if family is not None: # check is family presents
matched = matched.filtered_by_family(family) # filtered with family
matched = matched.find_records_with_pm(pm) # find all records found at the head of title or in its col
if matched.shape[0] > 0:
mean, std = matched.USD.mean(), matched.USD.std()
if np.isnan(std): # make sure not nan
std = mean / 2
remain = self.data.loc[(~self.data.index.isin(matched.index.values)) &
(abs(mean - self.data.USD) <= std)] # 68%
matched = matched.loc[(abs(mean - self.data.USD) <= std * 2)].append(remain) # 95%
return Listings(matched)
def get_invalid_manufacturers(self, manufacturers, pm_mapping):
"""
find all invalid pms with a valid manufacturers and a pm mapping
greedy search
:param pm_mapping: a mapping from official name to different abbreviation or unofficial name
:param manufacturers: list of manufacturer
:return: a copy of the listings instance
"""
ims, vms = [], []
for x in manufacturers:
vms.append(x)
if x in pm_mapping:
vms.extend(pm_mapping[x]) # get all valid unofficial name
for im in self.data[self.data.USD < 40].manufacturer_lower.unique():
matched = False
for vm in vms:
if partial_match(im, vm) == 100: # partial match will be ok, since fuji is equivalent to fujifilm
matched = True
break
if not matched:
ims.append(im)
return ims
def get_pm_dict(self, manufacturers, pm_mapping):
"""
build a dict of listings by pm using normal distribution
:param manufacturers:unique pm found in products
:param pm_mapping: a mapping from official name to different abbreviation or unofficial name
:return: a dict of listings
"""
ims = self.get_invalid_manufacturers(manufacturers, pm_mapping)
pm_dict = {}
for pm in manufacturers:
matched = self.data[self.data.apply( # find all valid record contains pm
lambda row: (row.manufacturer_lower not in ims) and (
partial_match(pm, row.manufacturer_lower) == 100 or row.title_lower.startswith(pm)), axis=1)]
mean, std = matched.USD.mean(), matched.USD.std()
if np.isnan(std): # make sure not nan
std = mean / 2
matched = matched.loc[
(abs(mean - self.data.USD) <= std)].manufacturer_lower.unique() # find valid unique items
matched = self.data[
self.data.manufacturer_lower.isin(matched) & (self.data.manufacturer_lower.apply( # listings by pm
lambda x: partial_match(pm, x) == 100
) | self.data.title_lower.apply(
lambda x: partial_match(pm, x) == 100
))]
pm_dict[pm] = Listings(matched)
return pm_dict
| |
# Copyright (c) 2014, Yuta Okamoto <okapies@gmail.com>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
from .validators import boolean, integer, mutually_exclusive
class Source(AWSProperty):
props = {
'Password': (basestring, False),
'Revision': (basestring, False),
'SshKey': (basestring, False),
'Type': (basestring, False),
'Url': (basestring, False),
'Username': (basestring, False),
}
class SslConfiguration(AWSProperty):
props = {
'Certificate': (basestring, True),
'Chain': (basestring, False),
'PrivateKey': (basestring, True),
}
class ChefConfiguration(AWSProperty):
props = {
'BerkshelfVersion': (basestring, False),
'ManageBerkshelf': (boolean, False),
}
class Recipes(AWSProperty):
props = {
'Configure': ([basestring], False),
'Deploy': ([basestring], False),
'Setup': ([basestring], False),
'Shutdown': ([basestring], False),
'Undeploy': ([basestring], False),
}
def validate_volume_type(volume_type):
volume_types = ('standard', 'io1', 'gp2')
if volume_type not in volume_types:
raise ValueError("VolumeType (given: %s) must be one of: %s" % (
volume_type, ', '.join(volume_types)))
return volume_type
class VolumeConfiguration(AWSProperty):
props = {
'Encrypted': (boolean, False),
'Iops': (integer, False),
'MountPoint': (basestring, True),
'NumberOfDisks': (integer, True),
'RaidLevel': (integer, False),
'Size': (integer, True),
'VolumeType': (validate_volume_type, False)
}
def validate(self):
volume_type = self.properties.get('VolumeType')
iops = self.properties.get('Iops')
if volume_type == 'io1' and not iops:
raise ValueError("Must specify Iops if VolumeType is 'io1'.")
if volume_type != 'io1' and iops:
raise ValueError("Cannot specify Iops if VolumeType is not 'io1'.")
class StackConfigurationManager(AWSProperty):
props = {
'Name': (basestring, False),
'Version': (basestring, False),
}
class TimeBasedAutoScaling(AWSProperty):
props = {
'Monday': (dict, False),
'Tuesday': (dict, False),
'Wednesday': (dict, False),
'Thursday': (dict, False),
'Friday': (dict, False),
'Saturday': (dict, False),
'Sunday': (dict, False),
}
class AutoScalingThresholds(AWSProperty):
props = {
'CpuThreshold': (float, False),
'IgnoreMetricsTime': (integer, False),
'InstanceCount': (integer, False),
'LoadThreshold': (float, False),
'MemoryThreshold': (float, False),
'ThresholdsWaitTime': (integer, False),
}
class Environment(AWSProperty):
props = {
'Key': (basestring, True),
'Secure': (bool, False),
'Value': (basestring, True),
}
class LoadBasedAutoScaling(AWSProperty):
props = {
'DownScaling': (AutoScalingThresholds, False),
'Enable': (bool, False),
'UpScaling': (AutoScalingThresholds, False),
}
def validate_data_source_type(data_source_type):
data_source_types = (
'AutoSelectOpsworksMysqlInstance',
'OpsworksMysqlInstance',
'RdsDbInstance'
)
if data_source_type not in data_source_types:
raise ValueError("Type (given: %s) must be one of: %s" % (
data_source_type, ', '.join(data_source_types)))
return data_source_type
class DataSource(AWSProperty):
props = {
'Arn': (basestring, False),
'DatabaseName': (basestring, False),
'Type': (validate_data_source_type, False)
}
class App(AWSObject):
resource_type = "AWS::OpsWorks::App"
props = {
'AppSource': (Source, False),
'Attributes': (dict, False),
'DataSources': ([DataSource], False),
'Description': (basestring, False),
'Domains': ([basestring], False),
'EnableSsl': (boolean, False),
'Environment': ([Environment], False),
'Name': (basestring, True),
'Shortname': (basestring, False),
'SslConfiguration': (SslConfiguration, False),
'StackId': (basestring, True),
'Type': (basestring, True),
}
class ElasticLoadBalancerAttachment(AWSObject):
resource_type = "AWS::OpsWorks::ElasticLoadBalancerAttachment"
props = {
'ElasticLoadBalancerName': (basestring, True),
'LayerId': (basestring, True),
'Tags': ((Tags, list), False),
}
class EbsBlockDevice(AWSProperty):
props = {
'DeleteOnTermination': (boolean, False),
'Iops': (integer, False),
'SnapshotId': (basestring, False),
'VolumeSize': (integer, False),
'VolumeType': (basestring, False),
}
class BlockDeviceMapping(AWSProperty):
props = {
'DeviceName': (basestring, False),
'Ebs': (EbsBlockDevice, False),
'NoDevice': (basestring, False),
'VirtualName': (basestring, False),
}
def validate(self):
conds = [
'Ebs',
'VirtualName',
]
mutually_exclusive(self.__class__.__name__, self.properties, conds)
class Instance(AWSObject):
resource_type = "AWS::OpsWorks::Instance"
props = {
'AgentVersion': (basestring, False),
'AmiId': (basestring, False),
'Architecture': (basestring, False),
'AutoScalingType': (basestring, False),
'AvailabilityZone': (basestring, False),
'BlockDeviceMappings': ([BlockDeviceMapping], False),
'EbsOptimized': (boolean, False),
'ElasticIps': ([basestring], False),
'Hostname': (basestring, False),
'InstallUpdatesOnBoot': (boolean, False),
'InstanceType': (basestring, True),
'LayerIds': ([basestring], True),
'Os': (basestring, False),
'RootDeviceType': (basestring, False),
'SshKeyName': (basestring, False),
'StackId': (basestring, True),
'SubnetId': (basestring, False),
'Tenancy': (basestring, False),
'TimeBasedAutoScaling': (TimeBasedAutoScaling, False),
'VirtualizationType': (basestring, False),
'Volumes': ([basestring], False),
}
class ShutdownEventConfiguration(AWSProperty):
props = {
'DelayUntilElbConnectionsDrained': (boolean, False),
'ExecutionTimeout': (integer, False),
}
class LifeCycleConfiguration(AWSProperty):
props = {
'ShutdownEventConfiguration': (ShutdownEventConfiguration, False),
}
class Layer(AWSObject):
resource_type = "AWS::OpsWorks::Layer"
props = {
'Attributes': (dict, False),
'AutoAssignElasticIps': (boolean, True),
'AutoAssignPublicIps': (boolean, True),
'CustomInstanceProfileArn': (basestring, False),
'CustomJson': ((basestring, dict), False),
'CustomRecipes': (Recipes, False),
'CustomSecurityGroupIds': ([basestring], False),
'EnableAutoHealing': (boolean, True),
'InstallUpdatesOnBoot': (boolean, False),
'LifecycleEventConfiguration': (LifeCycleConfiguration, False),
'LoadBasedAutoScaling': (LoadBasedAutoScaling, False),
'Name': (basestring, True),
'Packages': ([basestring], False),
'Shortname': (basestring, True),
'StackId': (basestring, True),
'Type': (basestring, True),
'VolumeConfigurations': ([VolumeConfiguration], False),
}
class RdsDbInstance(AWSProperty):
props = {
'DbPassword': (basestring, True),
'DbUser': (basestring, True),
'RdsDbInstanceArn': (basestring, True)
}
class ElasticIp(AWSProperty):
props = {
'Ip': (basestring, True),
'Name': (basestring, False),
}
class Stack(AWSObject):
resource_type = "AWS::OpsWorks::Stack"
props = {
'AgentVersion': (basestring, False),
'Attributes': (dict, False),
'ChefConfiguration': (ChefConfiguration, False),
'CloneAppIds': ([basestring], False),
'ClonePermissions': (boolean, False),
'ConfigurationManager': (StackConfigurationManager, False),
'CustomCookbooksSource': (Source, False),
'CustomJson': ((basestring, dict), False),
'DefaultAvailabilityZone': (basestring, False),
'DefaultInstanceProfileArn': (basestring, True),
'DefaultOs': (basestring, False),
'DefaultRootDeviceType': (basestring, False),
'DefaultSshKeyName': (basestring, False),
'DefaultSubnetId': (basestring, False),
'EcsClusterArn': (basestring, False),
'ElasticIps': ([ElasticIp], False),
'HostnameTheme': (basestring, False),
'Name': (basestring, True),
'RdsDbInstances': ([RdsDbInstance], False),
'ServiceRoleArn': (basestring, True),
'SourceStackId': (basestring, False),
'Tags': ((Tags, list), False),
'UseCustomCookbooks': (boolean, False),
'UseOpsworksSecurityGroups': (boolean, False),
'VpcId': (basestring, False),
}
def validate(self):
if 'VpcId' in self.properties and \
'DefaultSubnetId' not in self.properties:
raise ValueError('Using VpcId requires DefaultSubnetId to be'
'specified')
return True
class UserProfile(AWSObject):
resource_type = "AWS::OpsWorks::UserProfile"
props = {
'AllowSelfManagement': (boolean, False),
'IamUserArn': (basestring, True),
'SshPublicKey': (basestring, False),
'SshUsername': (basestring, False),
}
class Volume(AWSObject):
resource_type = "AWS::OpsWorks::Volume"
props = {
'Ec2VolumeId': (basestring, True),
'MountPoint': (basestring, False),
'Name': (basestring, False),
'StackId': (basestring, True),
}
class EngineAttribute(AWSProperty):
props = {
'Name': (basestring, False),
'Value': (basestring, False),
}
class Server(AWSObject):
resource_type = "AWS::OpsWorksCM::Server"
props = {
'AssociatePublicIpAddress': (boolean, False),
'BackupId': (basestring, False),
'BackupRetentionCount': (integer, False),
'CustomCertificate': (basestring, False),
'CustomDomain': (basestring, False),
'CustomPrivateKey': (basestring, False),
'DisableAutomatedBackup': (boolean, False),
'Engine': (basestring, False),
'EngineAttributes': ([EngineAttribute], False),
'EngineModel': (basestring, False),
'EngineVersion': (basestring, False),
'InstanceProfileArn': (basestring, True),
'InstanceType': (basestring, True),
'KeyPair': (basestring, False),
'PreferredBackupWindow': (basestring, False),
'PreferredMaintenanceWindow': (basestring, False),
'SecurityGroupIds': ([basestring], False),
'ServerName': (basestring, False),
'ServiceRoleArn': (basestring, True),
'SubnetIds': ([basestring], False),
'Tags': ((Tags, list), False),
}
| |
# Copyright (c) 2019 Mycroft AI, Inc. and Matthew Scholefield
#
# This file is part of Mycroft Light
# (see https://github.com/MatthewScholefield/mycroft-light).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from inspect import isclass
from typing import NamedTuple
from typing import Union, Any, Callable, Dict
from mycroft.intent_match import IntentMatch
from mycroft.plugin.util import Empty
from mycroft.util.misc import warn_once, recursive_merge
class BoolAttr:
def __init__(self, default=False):
self.value = default
def __call__(self, value=True):
self.value = value
def __bool__(self):
return self.value
def __repr__(self):
return 'BoolAttr(%s)' % self.value
class Package:
"""
Object to store skill interaction data
Example Usage:
>>> p = Package({
... 'faceplate': {
... 'mouth': {
... 'text': str
... },
... 'eyes': {
... 'color': (int, int, int),
... }
... }
... })
...
>>> p.faceplate.mouth.text = 'hello'
>>> p.faceplate.eyes.color = (0, 255, 100)
>>> print(p)
faceplate:
mouth:
text: 'hello'
eyes:
color: (0, 255, 100)
>>> p.execute({
... 'faceplate': {
... 'mouth': {
... 'text': lambda x: print('Setting the faceplate text to:', x)
... },
... 'eyes': {
... 'color': lambda colors: print('Setting the eye color to:', colors)
... }
... }
... })
...
Setting the faceplate text to: hello
Setting the eye color to: (0, 255, 100)
"""
def __init__(self, struct: dict = None):
self._struct = struct or {}
self._load_struct(self._struct)
def add(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
return self
def __type_hinting__(self):
self.action = '' # type: str
self.skip_activation = '' # type: bool
FaceplateType = NamedTuple('Faceplate', [
('eyes', NamedTuple('Eyes', [
('blink', Callable), ('reset', Callable), ('color', tuple)
])),
('mouth', NamedTuple('Mouth', [
('text', str), ('reset', Callable), ('listen', Callable)
]))
])
self.faceplate: FaceplateType = ''
self.data = '' # type: dict
self.skill = '' # type: str
self.lang = '' # type: str
self.match = '' # type: IntentMatch
self.confidence = '' # type: float
def _load_struct(self, struct: dict):
if not isinstance(struct, dict):
raise ValueError('Invalid struct: ' + str(struct))
for key, value in struct.items():
if key in self.__dict__:
self._verify_assignment(key, self.__dict__[key])
continue
if isinstance(value, dict):
self.__dict__[key] = Package(value)
elif value == ():
self.__dict__[key] = BoolAttr()
else:
self.__dict__[key] = None
def add_struct(self, struct: dict):
self._load_struct(struct)
self._struct = dict(recursive_merge(self._struct, struct))
@classmethod
def get_type(cls, obj):
if isinstance(obj, (list, set, tuple)):
return type(obj)(map(cls.get_type, obj))
return type(obj)
def _verify_assignment(self, key, value):
"""Checks types according to values defined in the package structure"""
if value is None:
return
if key not in self._struct:
message = 'Setting nonexistent attribute, ' + key + ', to ' + str(value)
warn_once(type(self).__name__ + key, message, stack_offset=2)
return
desc = self._struct[key]
if isinstance(desc, dict):
raise AttributeError(key + ' must be followed by one of: ' + str(list(desc)))
if desc == ():
raise AttributeError('This should be called like: ' + key + '()')
if isinstance(desc, (list, set, tuple)) and len(desc) > 0 and not isclass(list(desc)[0]):
if value not in desc:
raise TypeError(value + ' must be one of the following values: ' + str(list(desc)))
else:
value_typ = self.get_type(value)
if desc != value_typ:
raise TypeError('Cannot assign value {!r} of type {} to type {}'.format(
value, value_typ, desc
))
def __setattr__(self, key, value):
if key.startswith('_') or key in ('rt', 'config'):
return object.__setattr__(self, key, value)
self._verify_assignment(key, value)
self.__dict__[key] = value
def __getattr__(self, item):
try:
return self.__dict__[item]
except KeyError:
if item.startswith('_'):
raise AttributeError(item)
warn_once((type(self).__name__, item), 'package.' + item + ' attribute not found',
stack_offset=1)
return Empty()
@staticmethod
def _to_str(cls, obj, indent=4, indent_level=0):
"""Show visual tree of attributes"""
if not isinstance(obj, cls):
def format_iter(x):
return ', '.join(formatters.get(type(i), repr)(i) for i in x)
formatters = {
tuple: lambda x: '(' + format_iter(x) + ')',
set: lambda x: 'set(' + format_iter(x) + ')',
list: lambda x: '[' + format_iter(x) + ']',
type: lambda x: x.__name__,
BoolAttr: lambda x: 'True' if x else '',
type(None): lambda _: ''
}
return formatters.get(type(obj), repr)(obj) + '\n'
s = '\n'
for key, value in sorted(obj.items(),
key=lambda k_v: ('zzz' + k_v[0]) if isinstance(k_v[1], cls) else
k_v[0]):
if key.startswith('_') or not value:
continue
value_str = Package._to_str(cls, value, indent, indent_level + 1)
s += ' ' * indent * indent_level + str(key) + ': ' + value_str
return s
def render_structure(self):
return self._to_str(dict, self._struct)
def __repr__(self):
return self._to_str(type(self), self)
def items(self):
for key, value in self.__dict__.items():
if key.startswith('_') or not value:
continue
if isinstance(value, Package):
yield key, dict(value.items())
else:
yield key, value
def __bool__(self):
for key, value in self.__dict__.items():
if key.startswith('_') or not value:
continue
if value:
return True
return False
@classmethod
def execute_data(cls, data: Union[Dict, Any], handlers: Union[Dict, Callable]):
"""
Pairs a dict of handlers with the package's data.
For example usage see the constructor for this class
"""
if callable(handlers):
return handlers(data)
results = {}
for key, value in data.items():
if key not in handlers or not value:
continue
results[key] = cls.execute_data(value, handlers[key])
return results
def execute(self, handlers: Dict):
return self.execute_data(self, handlers)
| |
# coding: utf-8
# Copyright (c) 2013 Jorge Javier Araya Navarro <jorgean@lavabit.org>
#
# This file is free software: you may copy, redistribute and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from . import summanode
import pyglet
from pyglet import gl
from . import euclid
import math
import copy
import ctypes
cuadric_t = '''
void main() {
vec2 pos = gl_TexCoord[0].st;
float res = pos.x*pos.x - pos.y;
if (res<0.0) {
gl_FragColor = gl_Color;
} else {
gl_FragColor = vec4(0.0,0.0,0.0,0.0);
}
}
'''
class Shader(object):
def __init__(self, source):
self.source = source
self.shader_no = gl.glCreateShader(self.shader_type)
if not self.shader_no:
raise Exception("could not create shader")
prog = (ctypes.c_char_p * 1)(source+chr(0))
length = (ctypes.c_int * 1)(0)
gl.glShaderSource(self.shader_no, 1,
ctypes.cast(prog, ctypes.POINTER(ctypes.POINTER(ctypes.c_char))),
ctypes.cast(0, ctypes.POINTER(ctypes.c_int)))
gl.glCompileShader(self.shader_no)
self.program_no = gl.glCreateProgram()
if not self.program_no:
raise Exception("could not create program")
gl.glAttachShader(self.program_no, self.shader_no)
gl.glLinkProgram(self.program_no)
def begin(self):
gl.glUseProgram(self.program_no)
def end(self):
gl.glUseProgram(0)
class VertexShader(Shader):
shader_type = gl.GL_VERTEX_SHADER
class FragmentShader(Shader):
shader_type = gl.GL_FRAGMENT_SHADER
#cuadric = FragmentShader(cuadric_t)
__parameter_count = 0
def parameter(default=None):
global __parameter_count
name = str(__parameter_count)
__parameter_count+=1
def setter(self, value):
self._dirty = True
setattr(self, "_"+name, value)
def getter(self):
return getattr(self, "_"+name, default)
return property(getter, setter)
ROUND_CAP, SQUARE_CAP, BUTT_CAP = range(3)
MITER_JOIN, BEVEL_JOIN, ROUND_JOIN = range(3)
class Context(object):
def __init__(self):
self.color = 255,255,255,255
self.stroke_width = 2
self.cap = ROUND_CAP
self.join = ROUND_JOIN
self.transform = euclid.Matrix3()
def set_state(self):
gl.glPushAttrib(gl.GL_CURRENT_BIT|gl.GL_LINE_BIT)
gl.glColor4ub(*self.color)
gl.glLineWidth(self.stroke_width)
def unset_state(self):
gl.glPopAttrib()
def copy(self):
return copy.deepcopy(self)
def flatten(*args):
ret = []
for a in args:
for v in a:
ret.append( v )
return ret
class Segment:
def __init__(self, start, end, width):
self.start = euclid.Point2(*start)
self.end = euclind.Point2(*end)
self.width = width
self._tl = None
self._bl = None
self._tr = None
self._br = None
@property
def direction(self):
return euclid.Vector2( *(self.end-self.start)).normalized()
@property
def line_width(self):
return (
euclid.Matrix3.new_rotate(math.radians(90)) * self.direction *
(self.width / 2.0))
@property
def tl(self):
if self._tl: return self._tl
return self.end + self.line_width
@property
def tr(self):
if self._tr: return self._tr
return self.end - self.line_width
@property
def bl(self):
if self._bl: return self._bl
return self.start + self.line_width
@property
def br(self):
if self._br: return self._br
return self.start - self.line_width
@property
def left(self):
return euclid.LineSegment2(euclid.Point2(*self.bl),
euclid.Point2(*self.tl))
@property
def right(self):
return euclid.LineSegment2(euclid.Point2(*self.br),
euclid.Point2(*self.tr))
@property
def points(self):
return flatten(self.bl, self.br, self.tr, self.bl, self.tr, self.tl)
def reversed(self):
return Segment(self.end, self.start, self.width)
class Canvas(summanode.SummaNode):
def __init__(self):
super(Canvas, self).__init__()
self._dirty = True
self._color = 255,255,255,255
self._stroke_width = 1
self._parts = []
self._vertex_list = None
self._context = Context()
self._context_stack = []
self._texture = image = pyglet.resource.image('draw_texture.png').get_texture()
self._context_change = True
self._position = 0,0
def draw(self):
if self._dirty:
self._context = Context()
self._parts = []
self.free()
self.render()
self.build_vbo()
self._dirty = False
# set
gl.glEnable(self._texture.target)
gl.glBindTexture(self._texture.target, self._texture.id)
gl.glPushAttrib(gl.GL_COLOR_BUFFER_BIT)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
gl.glPushMatrix()
self.transform()
#cuadric.begin()
self._vertex_list.draw(gl.GL_TRIANGLES)
#cuadric.end()
# unset
gl.glPopMatrix()
gl.glPopAttrib()
gl.glDisable(self._texture.target)
def endcap(self, line, cap_type):
strip = []
texcoord = []
if cap_type == ROUND_CAP:
s = Segment( line.start,
line.start + (-line.direction) * line.width / 2,
line.width)
strip.extend([int(x) for x in flatten(
s.bl, s.br, s.end,
s.br, s.tr, s.end,
s.bl, s.tl, s.end
)])
texcoord.extend([
0.1, 0.9, 0.1, 0.5, 0.5, 0.9,
0, 0, 0.5, 0, 1, 1,
0, 0, 0.5, 0, 1, 1,])
elif cap_type == SQUARE_CAP:
segment = Segment( line.start,
line.start + (-line.direction) * line.width / 2,
line.width
)
strip.extend([int(x) for x in segment.points])
texcoord.extend( flatten(*[(0.1, 0.9, 0.1, 0.5, 0.5, 0.9)
for x in range(len(segment.points) / 6)]
))
return strip, texcoord
def build_vbo(self):
strip = []
colors = []
texcoord = []
for ctx, parts in self._parts:
start_len = len(strip)
for line in parts:
# build the line segments
last = line[0]
segments = []
for next in line[1:]:
segments.append( Segment( last, next, ctx.stroke_width ) )
last = next
# do we need caps?
if line[0] == line[-1]:
closed_path = True
else:
closed_path = False
# add caps
if not closed_path:
vertex, tex = self.endcap(segments[0], ctx.cap)
strip += vertex
texcoord += tex
vertex, tex = self.endcap(segments[-1].reversed(), ctx.cap)
strip += vertex
texcoord += tex
# update middle points
prev = None
for i, current in enumerate(segments):
# if not starting line
if ( prev ):
# turns left
inter = prev.left.intersect( current.left )
if inter:
prev._tl = inter
current._bl = inter
bottom = prev.tr
top = current.br
else:
inter = prev.right.intersect( current.right )
if inter:
prev._tr = inter
current._br = inter
bottom = prev.tl
top = current.bl
# add elbow
if ( prev and inter ):
if ctx.join == BEVEL_JOIN:
strip.extend( [ int(x) for x in
list(inter) + list(bottom) + list(top)
])
texcoord += [ 0.1,0.9,0.1,0.5,0.5,0.9 ]
elif ctx.join in (MITER_JOIN, ROUND_JOIN):
if bottom == top:
far = euclid.Point2(*bottom)
else:
far = euclid.Ray2(
euclid.Point2(*bottom), prev.direction
).intersect(euclid.Ray2(
euclid.Point2(*top), -current.direction
))
strip.extend( [ int(x) for x in
list(inter) + list(bottom) + list(top) +
list(bottom) + list(top) + list(far)
])
if ctx.join == ROUND_JOIN:
texcoord += [ 0.1,0.9,0.1,0.5,0.5,0.9, 0,0,1,1,0.5,0]
elif ctx.join == MITER_JOIN:
texcoord += [ 0.1,0.9,0.1,0.5,0.5,0.9,0.1,0.9,0.1,0.5,0.5,0.9 ]
# rotate values
prev = current
# add boxes for lines
for s in segments:
strip.extend( [ int(x) for x in s.points ] )
texcoord += flatten(*[ (0.1,0.9,0.1,0.5,0.5,0.9)
for x in range( len(s.points)/6)
])
colors.extend( list(ctx.color)*((len(strip)-start_len)/2) )
vertex_list = pyglet.graphics.vertex_list(len(strip)/2,
('v2i', strip),
('c4B', colors ),
('t2f', texcoord),
)
self._vertex_list = vertex_list
def on_exit(self):
self.free()
super(Canvas, self).on_exit()
def free(self):
self._dirty = True
if self._vertex_list:
self._vertex_list.delete()
self._vertex_list = None
def set_color(self, color):
self._context.color = color
self._context_change = True
def set_stroke_width(self, stroke_width):
self._context.stroke_width = stroke_width
self._context_change = True
def set_endcap(self, cap):
self._context.cap = cap
self._context_change = True
def set_join(self, join):
self._context.join = join
self._context_change = True
def rotate(self, radians):
self._context.transform.rotate( radians )
def translate(self, vect):
self._context.transform.translate( *vect )
def move_to(self, position):
self._position = self._context.transform * euclid.Point2(*position)
def line_to(self, end):
if self._context_change:
context, parts = self._context, [[self._position]]
self._parts.append((context, parts))
self._context = context.copy()
self._context_change = False
else:
context, parts = self._parts[-1]
end = self._context.transform * euclid.Point2(*end)
if parts[-1][-1] == self._position:
parts[-1].append( end )
else:
parts.append( [self._position, end] )
self._position = end
def push(self):
self._context_stack.append( self._context.copy() )
def pop(self):
self._context = self._context_stack.pop()
class Line(Canvas):
start = parameter()
end = parameter()
stroke_width = parameter()
color = parameter()
def __init__(self, start, end, color, stroke_width=1):
super(Line, self).__init__()
self.start = start
self.end = end
self.color = color
self.stroke_width = stroke_width
def render(self):
self.set_color( self.color )
self.set_stroke_width( self.stroke_width )
self.move_to( self.start )
self.line_to( self.end )
| |
"""Facility to use the Expat parser to load a minidom instance
from a string or file.
This avoids all the overhead of SAX and pulldom to gain performance.
"""
# Warning!
#
# This module is tightly bound to the implementation details of the
# minidom DOM and can't be used with other DOM implementations. This
# is due, in part, to a lack of appropriate methods in the DOM (there is
# no way to create Entity and Notation nodes via the DOM Level 2
# interface), and for performance. The later is the cause of some fairly
# cryptic code.
#
# Performance hacks:
#
# - .character_data_handler() has an extra case in which continuing
# data is appended to an existing Text node; this can be a
# speedup since pyexpat can break up character data into multiple
# callbacks even though we set the buffer_text attribute on the
# parser. This also gives us the advantage that we don't need a
# separate normalization pass.
#
# - Determining that a node exists is done using an identity comparison
# with None rather than a truth test; this avoids searching for and
# calling any methods on the node object if it exists. (A rather
# nice speedup is achieved this way as well!)
from xml.dom import xmlbuilder, minidom, Node
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE
from xml.parsers import expat
from xml.dom.minidom import _append_child, _set_attribute_node
from xml.dom.NodeFilter import NodeFilter
TEXT_NODE = Node.TEXT_NODE
CDATA_SECTION_NODE = Node.CDATA_SECTION_NODE
DOCUMENT_NODE = Node.DOCUMENT_NODE
FILTER_ACCEPT = xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT
FILTER_REJECT = xmlbuilder.DOMBuilderFilter.FILTER_REJECT
FILTER_SKIP = xmlbuilder.DOMBuilderFilter.FILTER_SKIP
FILTER_INTERRUPT = xmlbuilder.DOMBuilderFilter.FILTER_INTERRUPT
theDOMImplementation = minidom.getDOMImplementation()
# Expat typename -> TypeInfo
_typeinfo_map = {
"CDATA": minidom.TypeInfo(None, "cdata"),
"ENUM": minidom.TypeInfo(None, "enumeration"),
"ENTITY": minidom.TypeInfo(None, "entity"),
"ENTITIES": minidom.TypeInfo(None, "entities"),
"ID": minidom.TypeInfo(None, "id"),
"IDREF": minidom.TypeInfo(None, "idref"),
"IDREFS": minidom.TypeInfo(None, "idrefs"),
"NMTOKEN": minidom.TypeInfo(None, "nmtoken"),
"NMTOKENS": minidom.TypeInfo(None, "nmtokens"),
}
class ElementInfo(object):
__slots__ = '_attr_info', '_model', 'tagName'
def __init__(self, tagName, model=None):
self.tagName = tagName
self._attr_info = []
self._model = model
def __getstate__(self):
return self._attr_info, self._model, self.tagName
def __setstate__(self, state):
self._attr_info, self._model, self.tagName = state
def getAttributeType(self, aname):
for info in self._attr_info:
if info[1] == aname:
t = info[-2]
if t[0] == "(":
return _typeinfo_map["ENUM"]
else:
return _typeinfo_map[info[-2]]
return minidom._no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return minidom._no_type
def isElementContent(self):
if self._model:
type = self._model[0]
return type not in (expat.model.XML_CTYPE_ANY,
expat.model.XML_CTYPE_MIXED)
else:
return False
def isEmpty(self):
if self._model:
return self._model[0] == expat.model.XML_CTYPE_EMPTY
else:
return False
def isId(self, aname):
for info in self._attr_info:
if info[1] == aname:
return info[-2] == "ID"
return False
def isIdNS(self, euri, ename, auri, aname):
# not sure this is meaningful
return self.isId((auri, aname))
def _intern(builder, s):
return builder._intern_setdefault(s, s)
def _parse_ns_name(builder, name):
assert ' ' in name
parts = name.split(' ')
intern = builder._intern_setdefault
if len(parts) == 3:
uri, localname, prefix = parts
prefix = intern(prefix, prefix)
qname = "%s:%s" % (prefix, localname)
qname = intern(qname, qname)
localname = intern(localname, localname)
else:
uri, localname = parts
prefix = EMPTY_PREFIX
qname = localname = intern(localname, localname)
return intern(uri, uri), localname, prefix, qname
class ExpatBuilder:
"""Document builder that uses Expat to build a ParsedXML.DOM document
instance."""
def __init__(self, options=None):
if options is None:
options = xmlbuilder.Options()
self._options = options
if self._options.filter is not None:
self._filter = FilterVisibilityController(self._options.filter)
else:
self._filter = None
# This *really* doesn't do anything in this case, so
# override it with something fast & minimal.
self._finish_start_element = id
self._parser = None
self.reset()
def createParser(self):
"""Create a new parser object."""
return expat.ParserCreate()
def getParser(self):
"""Return the parser object, creating a new one if needed."""
if not self._parser:
self._parser = self.createParser()
self._intern_setdefault = self._parser.intern.setdefault
self._parser.buffer_text = True
self._parser.ordered_attributes = True
self._parser.specified_attributes = True
self.install(self._parser)
return self._parser
def reset(self):
"""Free all data structures used during DOM construction."""
self.document = theDOMImplementation.createDocument(
EMPTY_NAMESPACE, None, None)
self.curNode = self.document
self._elem_info = self.document._elem_info
self._cdata = False
def install(self, parser):
"""Install the callbacks needed to build the DOM into the parser."""
# This creates circular references!
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.first_element_handler
parser.EndElementHandler = self.end_element_handler
parser.ProcessingInstructionHandler = self.pi_handler
if self._options.entities:
parser.EntityDeclHandler = self.entity_decl_handler
parser.NotationDeclHandler = self.notation_decl_handler
if self._options.comments:
parser.CommentHandler = self.comment_handler
if self._options.cdata_sections:
parser.StartCdataSectionHandler = self.start_cdata_section_handler
parser.EndCdataSectionHandler = self.end_cdata_section_handler
parser.CharacterDataHandler = self.character_data_handler_cdata
else:
parser.CharacterDataHandler = self.character_data_handler
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
parser.XmlDeclHandler = self.xml_decl_handler
parser.ElementDeclHandler = self.element_decl_handler
parser.AttlistDeclHandler = self.attlist_decl_handler
def parseFile(self, file):
"""Parse a document from a file object, returning the document
node."""
parser = self.getParser()
first_buffer = True
try:
while 1:
buffer = file.read(16*1024)
if not buffer:
break
parser.Parse(buffer, 0)
if first_buffer and self.document.documentElement:
self._setup_subset(buffer)
first_buffer = False
parser.Parse("", True)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def parseString(self, string):
"""Parse a document from a string, returning the document node."""
parser = self.getParser()
try:
parser.Parse(string, True)
self._setup_subset(string)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def _setup_subset(self, buffer):
"""Load the internal subset if there might be one."""
if self.document.doctype:
extractor = InternalSubsetExtractor()
extractor.parseString(buffer)
subset = extractor.getSubset()
self.document.doctype.internalSubset = subset
def start_doctype_decl_handler(self, doctypeName, systemId, publicId,
has_internal_subset):
doctype = self.document.implementation.createDocumentType(
doctypeName, publicId, systemId)
doctype.ownerDocument = self.document
_append_child(self.document, doctype)
self.document.doctype = doctype
if self._filter and self._filter.acceptNode(doctype) == FILTER_REJECT:
self.document.doctype = None
del self.document.childNodes[-1]
doctype = None
self._parser.EntityDeclHandler = None
self._parser.NotationDeclHandler = None
if has_internal_subset:
if doctype is not None:
doctype.entities._seq = []
doctype.notations._seq = []
self._parser.CommentHandler = None
self._parser.ProcessingInstructionHandler = None
self._parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
def end_doctype_decl_handler(self):
if self._options.comments:
self._parser.CommentHandler = self.comment_handler
self._parser.ProcessingInstructionHandler = self.pi_handler
if not (self._elem_info or self._filter):
self._finish_end_element = id
def pi_handler(self, target, data):
node = self.document.createProcessingInstruction(target, data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def character_data_handler_cdata(self, data):
childNodes = self.curNode.childNodes
if self._cdata:
if ( self._cdata_continue
and childNodes[-1].nodeType == CDATA_SECTION_NODE):
childNodes[-1].appendData(data)
return
node = self.document.createCDATASection(data)
self._cdata_continue = True
elif childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
value = node.data + data
node.data = value
return
else:
node = minidom.Text()
node.data = data
node.ownerDocument = self.document
_append_child(self.curNode, node)
def character_data_handler(self, data):
childNodes = self.curNode.childNodes
if childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
node.data = node.data + data
return
node = minidom.Text()
node.data = node.data + data
node.ownerDocument = self.document
_append_child(self.curNode, node)
def entity_decl_handler(self, entityName, is_parameter_entity, value,
base, systemId, publicId, notationName):
if is_parameter_entity:
# we don't care about parameter entities for the DOM
return
if not self._options.entities:
return
node = self.document._create_entity(entityName, publicId,
systemId, notationName)
if value is not None:
# internal entity
# node *should* be readonly, but we'll cheat
child = self.document.createTextNode(value)
node.childNodes.append(child)
self.document.doctype.entities._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
del self.document.doctype.entities._seq[-1]
def notation_decl_handler(self, notationName, base, systemId, publicId):
node = self.document._create_notation(notationName, publicId, systemId)
self.document.doctype.notations._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_ACCEPT:
del self.document.doctype.notations._seq[-1]
def comment_handler(self, data):
node = self.document.createComment(data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def start_cdata_section_handler(self):
self._cdata = True
self._cdata_continue = False
def end_cdata_section_handler(self):
self._cdata = False
self._cdata_continue = False
def external_entity_ref_handler(self, context, base, systemId, publicId):
return 1
def first_element_handler(self, name, attributes):
if self._filter is None and not self._elem_info:
self._finish_end_element = id
self.getParser().StartElementHandler = self.start_element_handler
self.start_element_handler(name, attributes)
def start_element_handler(self, name, attributes):
node = self.document.createElement(name)
_append_child(self.curNode, node)
self.curNode = node
if attributes:
for i in range(0, len(attributes), 2):
a = minidom.Attr(attributes[i], EMPTY_NAMESPACE,
None, EMPTY_PREFIX)
value = attributes[i+1]
a.value = value
a.ownerDocument = self.document
_set_attribute_node(node, a)
if node is not self.document.documentElement:
self._finish_start_element(node)
def _finish_start_element(self, node):
if self._filter:
# To be general, we'd have to call isSameNode(), but this
# is sufficient for minidom:
if node is self.document.documentElement:
return
filt = self._filter.startContainer(node)
if filt == FILTER_REJECT:
# ignore this node & all descendents
Rejecter(self)
elif filt == FILTER_SKIP:
# ignore this node, but make it's children become
# children of the parent node
Skipper(self)
else:
return
self.curNode = node.parentNode
node.parentNode.removeChild(node)
node.unlink()
# If this ever changes, Namespaces.end_element_handler() needs to
# be changed to match.
#
def end_element_handler(self, name):
curNode = self.curNode
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
def _finish_end_element(self, curNode):
info = self._elem_info.get(curNode.tagName)
if info:
self._handle_white_text_nodes(curNode, info)
if self._filter:
if curNode is self.document.documentElement:
return
if self._filter.acceptNode(curNode) == FILTER_REJECT:
self.curNode.removeChild(curNode)
curNode.unlink()
def _handle_white_text_nodes(self, node, info):
if (self._options.whitespace_in_element_content
or not info.isElementContent()):
return
# We have element type information and should remove ignorable
# whitespace; identify for text nodes which contain only
# whitespace.
L = []
for child in node.childNodes:
if child.nodeType == TEXT_NODE and not child.data.strip():
L.append(child)
# Remove ignorable whitespace from the tree.
for child in L:
node.removeChild(child)
def element_decl_handler(self, name, model):
info = self._elem_info.get(name)
if info is None:
self._elem_info[name] = ElementInfo(name, model)
else:
assert info._model is None
info._model = model
def attlist_decl_handler(self, elem, name, type, default, required):
info = self._elem_info.get(elem)
if info is None:
info = ElementInfo(elem)
self._elem_info[elem] = info
info._attr_info.append(
[None, name, None, None, default, 0, type, required])
def xml_decl_handler(self, version, encoding, standalone):
self.document.version = version
self.document.encoding = encoding
# This is still a little ugly, thanks to the pyexpat API. ;-(
if standalone >= 0:
if standalone:
self.document.standalone = True
else:
self.document.standalone = False
# Don't include FILTER_INTERRUPT, since that's checked separately
# where allowed.
_ALLOWED_FILTER_RETURNS = (FILTER_ACCEPT, FILTER_REJECT, FILTER_SKIP)
class FilterVisibilityController(object):
"""Wrapper around a DOMBuilderFilter which implements the checks
to make the whatToShow filter attribute work."""
__slots__ = 'filter',
def __init__(self, filter):
self.filter = filter
def startContainer(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.startContainer(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"startContainer() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
def acceptNode(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.acceptNode(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val == FILTER_SKIP:
# move all child nodes to the parent, and remove this node
parent = node.parentNode
for child in node.childNodes[:]:
parent.appendChild(child)
# node is handled by the caller
return FILTER_REJECT
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"acceptNode() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
_nodetype_mask = {
Node.ELEMENT_NODE: NodeFilter.SHOW_ELEMENT,
Node.ATTRIBUTE_NODE: NodeFilter.SHOW_ATTRIBUTE,
Node.TEXT_NODE: NodeFilter.SHOW_TEXT,
Node.CDATA_SECTION_NODE: NodeFilter.SHOW_CDATA_SECTION,
Node.ENTITY_REFERENCE_NODE: NodeFilter.SHOW_ENTITY_REFERENCE,
Node.ENTITY_NODE: NodeFilter.SHOW_ENTITY,
Node.PROCESSING_INSTRUCTION_NODE: NodeFilter.SHOW_PROCESSING_INSTRUCTION,
Node.COMMENT_NODE: NodeFilter.SHOW_COMMENT,
Node.DOCUMENT_NODE: NodeFilter.SHOW_DOCUMENT,
Node.DOCUMENT_TYPE_NODE: NodeFilter.SHOW_DOCUMENT_TYPE,
Node.DOCUMENT_FRAGMENT_NODE: NodeFilter.SHOW_DOCUMENT_FRAGMENT,
Node.NOTATION_NODE: NodeFilter.SHOW_NOTATION,
}
class FilterCrutch(object):
__slots__ = '_builder', '_level', '_old_start', '_old_end'
def __init__(self, builder):
self._level = 0
self._builder = builder
parser = builder._parser
self._old_start = parser.StartElementHandler
self._old_end = parser.EndElementHandler
parser.StartElementHandler = self.start_element_handler
parser.EndElementHandler = self.end_element_handler
class Rejecter(FilterCrutch):
__slots__ = ()
def __init__(self, builder):
FilterCrutch.__init__(self, builder)
parser = builder._parser
for name in ("ProcessingInstructionHandler",
"CommentHandler",
"CharacterDataHandler",
"StartCdataSectionHandler",
"EndCdataSectionHandler",
"ExternalEntityRefHandler",
):
setattr(parser, name, None)
def start_element_handler(self, *args):
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# restore the old handlers
parser = self._builder._parser
self._builder.install(parser)
parser.StartElementHandler = self._old_start
parser.EndElementHandler = self._old_end
else:
self._level = self._level - 1
class Skipper(FilterCrutch):
__slots__ = ()
def start_element_handler(self, *args):
node = self._builder.curNode
self._old_start(*args)
if self._builder.curNode is not node:
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# We're popping back out of the node we're skipping, so we
# shouldn't need to do anything but reset the handlers.
self._builder._parser.StartElementHandler = self._old_start
self._builder._parser.EndElementHandler = self._old_end
self._builder = None
else:
self._level = self._level - 1
self._old_end(*args)
# framework document used by the fragment builder.
# Takes a string for the doctype, subset string, and namespace attrs string.
_FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID = \
"http://xml.python.org/entities/fragment-builder/internal"
_FRAGMENT_BUILDER_TEMPLATE = (
'''\
<!DOCTYPE wrapper
%%s [
<!ENTITY fragment-builder-internal
SYSTEM "%s">
%%s
]>
<wrapper %%s
>&fragment-builder-internal;</wrapper>'''
% _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID)
class FragmentBuilder(ExpatBuilder):
"""Builder which constructs document fragments given XML source
text and a context node.
The context node is expected to provide information about the
namespace declarations which are in scope at the start of the
fragment.
"""
def __init__(self, context, options=None):
if context.nodeType == DOCUMENT_NODE:
self.originalDocument = context
self.context = context
else:
self.originalDocument = context.ownerDocument
self.context = context
ExpatBuilder.__init__(self, options)
def reset(self):
ExpatBuilder.reset(self)
self.fragment = None
def parseFile(self, file):
"""Parse a document fragment from a file object, returning the
fragment node."""
return self.parseString(file.read())
def parseString(self, string):
"""Parse a document fragment from a string, returning the
fragment node."""
self._source = string
parser = self.getParser()
doctype = self.originalDocument.doctype
ident = ""
if doctype:
subset = doctype.internalSubset or self._getDeclarations()
if doctype.publicId:
ident = ('PUBLIC "%s" "%s"'
% (doctype.publicId, doctype.systemId))
elif doctype.systemId:
ident = 'SYSTEM "%s"' % doctype.systemId
else:
subset = ""
nsattrs = self._getNSattrs() # get ns decls from node's ancestors
document = _FRAGMENT_BUILDER_TEMPLATE % (ident, subset, nsattrs)
try:
parser.Parse(document, 1)
except:
self.reset()
raise
fragment = self.fragment
self.reset()
## self._parser = None
return fragment
def _getDeclarations(self):
"""Re-create the internal subset from the DocumentType node.
This is only needed if we don't already have the
internalSubset as a string.
"""
doctype = self.context.ownerDocument.doctype
s = ""
if doctype:
for i in range(doctype.notations.length):
notation = doctype.notations.item(i)
if s:
s = s + "\n "
s = "%s<!NOTATION %s" % (s, notation.nodeName)
if notation.publicId:
s = '%s PUBLIC "%s"\n "%s">' \
% (s, notation.publicId, notation.systemId)
else:
s = '%s SYSTEM "%s">' % (s, notation.systemId)
for i in range(doctype.entities.length):
entity = doctype.entities.item(i)
if s:
s = s + "\n "
s = "%s<!ENTITY %s" % (s, entity.nodeName)
if entity.publicId:
s = '%s PUBLIC "%s"\n "%s"' \
% (s, entity.publicId, entity.systemId)
elif entity.systemId:
s = '%s SYSTEM "%s"' % (s, entity.systemId)
else:
s = '%s "%s"' % (s, entity.firstChild.data)
if entity.notationName:
s = "%s NOTATION %s" % (s, entity.notationName)
s = s + ">"
return s
def _getNSattrs(self):
return ""
def external_entity_ref_handler(self, context, base, systemId, publicId):
if systemId == _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID:
# this entref is the one that we made to put the subtree
# in; all of our given input is parsed in here.
old_document = self.document
old_cur_node = self.curNode
parser = self._parser.ExternalEntityParserCreate(context)
# put the real document back, parse into the fragment to return
self.document = self.originalDocument
self.fragment = self.document.createDocumentFragment()
self.curNode = self.fragment
try:
parser.Parse(self._source, 1)
finally:
self.curNode = old_cur_node
self.document = old_document
self._source = None
return -1
else:
return ExpatBuilder.external_entity_ref_handler(
self, context, base, systemId, publicId)
class Namespaces:
"""Mix-in class for builders; adds support for namespaces."""
def _initNamespaces(self):
# list of (prefix, uri) ns declarations. Namespace attrs are
# constructed from this and added to the element's attrs.
self._ns_ordered_prefixes = []
def createParser(self):
"""Create a new namespace-handling parser."""
parser = expat.ParserCreate(namespace_separator=" ")
parser.namespace_prefixes = True
return parser
def install(self, parser):
"""Insert the namespace-handlers onto the parser."""
ExpatBuilder.install(self, parser)
if self._options.namespace_declarations:
parser.StartNamespaceDeclHandler = (
self.start_namespace_decl_handler)
def start_namespace_decl_handler(self, prefix, uri):
"""Push this namespace declaration on our storage."""
self._ns_ordered_prefixes.append((prefix, uri))
def start_element_handler(self, name, attributes):
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
else:
uri = EMPTY_NAMESPACE
qname = name
localname = None
prefix = EMPTY_PREFIX
node = minidom.Element(qname, uri, prefix, localname)
node.ownerDocument = self.document
_append_child(self.curNode, node)
self.curNode = node
if self._ns_ordered_prefixes:
for prefix, uri in self._ns_ordered_prefixes:
if prefix:
a = minidom.Attr(_intern(self, 'xmlns:' + prefix),
XMLNS_NAMESPACE, prefix, "xmlns")
else:
a = minidom.Attr("xmlns", XMLNS_NAMESPACE,
"xmlns", EMPTY_PREFIX)
a.value = uri
a.ownerDocument = self.document
_set_attribute_node(node, a)
del self._ns_ordered_prefixes[:]
if attributes:
node._ensure_attributes()
_attrs = node._attrs
_attrsNS = node._attrsNS
for i in range(0, len(attributes), 2):
aname = attributes[i]
value = attributes[i+1]
if ' ' in aname:
uri, localname, prefix, qname = _parse_ns_name(self, aname)
a = minidom.Attr(qname, uri, localname, prefix)
_attrs[qname] = a
_attrsNS[(uri, localname)] = a
else:
a = minidom.Attr(aname, EMPTY_NAMESPACE,
aname, EMPTY_PREFIX)
_attrs[aname] = a
_attrsNS[(EMPTY_NAMESPACE, aname)] = a
a.ownerDocument = self.document
a.value = value
a.ownerElement = node
if __debug__:
# This only adds some asserts to the original
# end_element_handler(), so we only define this when -O is not
# used. If changing one, be sure to check the other to see if
# it needs to be changed as well.
#
def end_element_handler(self, name):
curNode = self.curNode
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
assert (curNode.namespaceURI == uri
and curNode.localName == localname
and curNode.prefix == prefix), \
"element stack messed up! (namespace)"
else:
assert curNode.nodeName == name, \
"element stack messed up - bad nodeName"
assert curNode.namespaceURI == EMPTY_NAMESPACE, \
"element stack messed up - bad namespaceURI"
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
class ExpatBuilderNS(Namespaces, ExpatBuilder):
"""Document builder that supports namespaces."""
def reset(self):
ExpatBuilder.reset(self)
self._initNamespaces()
class FragmentBuilderNS(Namespaces, FragmentBuilder):
"""Fragment builder that supports namespaces."""
def reset(self):
FragmentBuilder.reset(self)
self._initNamespaces()
def _getNSattrs(self):
"""Return string of namespace attributes from this element and
ancestors."""
# XXX This needs to be re-written to walk the ancestors of the
# context to build up the namespace information from
# declarations, elements, and attributes found in context.
# Otherwise we have to store a bunch more data on the DOM
# (though that *might* be more reliable -- not clear).
attrs = ""
context = self.context
L = []
while context:
if hasattr(context, '_ns_prefix_uri'):
for prefix, uri in context._ns_prefix_uri.items():
# add every new NS decl from context to L and attrs string
if prefix in L:
continue
L.append(prefix)
if prefix:
declname = "xmlns:" + prefix
else:
declname = "xmlns"
if attrs:
attrs = "%s\n %s='%s'" % (attrs, declname, uri)
else:
attrs = " %s='%s'" % (declname, uri)
context = context.parentNode
return attrs
class ParseEscape(Exception):
"""Exception raised to short-circuit parsing in InternalSubsetExtractor."""
pass
class InternalSubsetExtractor(ExpatBuilder):
"""XML processor which can rip out the internal document type subset."""
subset = None
def getSubset(self):
"""Return the internal subset as a string."""
return self.subset
def parseFile(self, file):
try:
ExpatBuilder.parseFile(self, file)
except ParseEscape:
pass
def parseString(self, string):
try:
ExpatBuilder.parseString(self, string)
except ParseEscape:
pass
def install(self, parser):
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.start_element_handler
def start_doctype_decl_handler(self, name, publicId, systemId,
has_internal_subset):
if has_internal_subset:
parser = self.getParser()
self.subset = []
parser.DefaultHandler = self.subset.append
parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
else:
raise ParseEscape()
def end_doctype_decl_handler(self):
s = ''.join(self.subset).replace('\r\n', '\n').replace('\r', '\n')
self.subset = s
raise ParseEscape()
def start_element_handler(self, name, attrs):
raise ParseEscape()
def parse(file, namespaces=True):
"""Parse a document, returning the resulting Document node.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
if isinstance(file, str):
with open(file, 'rb') as fp:
result = builder.parseFile(fp)
else:
result = builder.parseFile(file)
return result
def parseString(string, namespaces=True):
"""Parse a document from a string, returning the resulting
Document node.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
return builder.parseString(string)
def parseFragment(file, context, namespaces=True):
"""Parse a fragment of a document, given the context from which it
was originally extracted. context should be the parent of the
node(s) which are in the fragment.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
if isinstance(file, str):
with open(file, 'rb') as fp:
result = builder.parseFile(fp)
else:
result = builder.parseFile(file)
return result
def parseFragmentString(string, context, namespaces=True):
"""Parse a fragment of a document from a string, given the context
from which it was originally extracted. context should be the
parent of the node(s) which are in the fragment.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
return builder.parseString(string)
def makeBuilder(options):
"""Create a builder based on an Options object."""
if options.namespaces:
return ExpatBuilderNS(options)
else:
return ExpatBuilder(options)
| |
# Copyright (c) 2017, John Skinner
import numpy as np
import transforms3d as tf
import util.transform as mytf
_TORAD = np.pi / 180.0
_TODEG = 180 / np.pi
class UnrealTransform:
"""
A special class for handling everything to do with specifying a location and orientation in Unreal specifically.
Unreal has a bunch of painful special cases, as follows:
- Left-handed coordinate system
- Degrees instead of radians
- Inconsistent direction of rotation
This class exists to handle any particular Unreal weirdness, and keep it bottled up here
"""
__slots__ = ['_x', '_y', '_z', '_roll', '_pitch', '_yaw']
def __init__(self, location=None, rotation=None):
"""
:param location: A 4x4 homogenous transformation matrix, Transform object, or and 3-indexable tuple
:param rotation: Any 3-indexable tuple listing rotation in degrees, order (roll, pitch, yaw)
"""
if isinstance(location, np.ndarray) and location.shape == (4, 4):
location = mytf.Transform(location)
if isinstance(location, mytf.Transform):
location = transform_to_unreal(location)
if isinstance(location, UnrealTransform):
self._x = location.x
self._y = location.y
self._z = location.z
self._roll = location.roll
self._pitch = location.pitch
self._yaw = location.yaw
else:
if location is not None and len(location) >= 3:
self._x, self._y, self._z = location
else:
self._x = self._y = self._z = 0
if rotation is not None and len(rotation) >= 3:
self._roll, self._pitch, self._yaw = rotation
else:
self._roll = self._pitch = self._yaw = 0
@property
def location(self):
"""
Get the location represented by this pose.
:return: A numpy
"""
return self._x, self._y, self._z
@property
def euler(self):
"""
Get the Tait-Bryan angles for the rotation of this transform.
Expressed as a numpy vector (pitch, yaw, roll),
:return: A numpy array containing the euler angles
"""
return self._roll, self._pitch, self._yaw
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def z(self):
return self._z
@property
def roll(self):
return self._roll
@property
def pitch(self):
return self._pitch
@property
def yaw(self):
return self._yaw
@property
def forward(self):
"""
Get the forward vector of the transform.
That is, the positive X direction in the local coordinate frame,
transformed to the outer coordinate frame.
:return: The direction the pose is "facing"
"""
return self.find_independent((1, 0, 0))
@property
def back(self):
"""
Get the back vector of the transform.
That is, the negative X direction of the local coordinate frame,
transformed to the outer coordinate frame.
:return: The direction backwards from the pose
"""
return -1 * self.forward
@property
def up(self):
"""
Get the up vector of the transform.
That is
:return: The "up" direction for this transform.
"""
return self.find_independent((0, 0, 1))
@property
def down(self):
return -1 * self.up
@property
def right(self):
return self.find_independent((0, 1, 0))
@property
def left(self):
return -1 * self.right
def find_relative(self, pose):
"""
Convert the given pose to be relative to this pose.
This is not commutative p1.find_relative(p2) != p2.find_relative(p1)
See Robotics: Vision and Control p 55-56 for the source of this math
:param pose: The world pose to convert
:return: A pose object relative to this pose
"""
# Remember, the pose matrix gives the position in world coordinates from a local position,
# So to find the world position, we have to reverse it
if isinstance(pose, mytf.Transform):
pose = transform_to_unreal(pose)
if isinstance(pose, UnrealTransform):
quat = euler2quat(self.roll, self.pitch, self.yaw)
inv_quat = tf.quaternions.qconjugate(quat)
pose_quat = euler2quat(pose.roll, pose.pitch, pose.yaw)
loc = tf.quaternions.rotate_vector(np.asarray(pose.location) - np.asarray(self.location), inv_quat)
rot = tf.quaternions.qmult(inv_quat, pose_quat)
return UnrealTransform(location=loc, rotation=quat2euler(rot[0], rot[1], rot[2], rot[3]))
elif len(pose) >= 3:
inv_quat = tf.quaternions.qinverse(euler2quat(self.roll, self.pitch, self.yaw))
return tf.quaternions.rotate_vector(np.asarray(pose) - np.asarray(self.location), inv_quat)
else:
raise TypeError('find_relative needs to transform a point or pose')
def find_independent(self, pose):
"""
Convert a pose to world coordinates.
Remember, pose is like a stack, so the returned pose will be relative to whatever
this pose is relative to.
:param pose: A pose relative to this pose, as a Transform or just as a point (any length-3 indexable object)
:return: A pose relative to whatever this pose is relative to, and independent of this pose
"""
# REMEMBER: pre-multiplying by the transformation matrix gives the world pose from the local
if isinstance(pose, mytf.Transform):
pose = transform_to_unreal(pose)
if isinstance(pose, UnrealTransform):
quat = euler2quat(self.roll, self.pitch, self.yaw)
pose_quat = euler2quat(pose.roll, pose.pitch, pose.yaw)
rot = tf.quaternions.qmult(quat, pose_quat)
loc = np.asarray(self.location) + np.asarray(tf.quaternions.rotate_vector(pose.location, quat))
return UnrealTransform(location=loc, rotation=quat2euler(rot[0], rot[1], rot[2], rot[3]))
elif len(pose) >= 3:
quat = euler2quat(self.roll, self.pitch, self.yaw)
return tf.quaternions.rotate_vector(np.asarray(pose), quat) + np.asarray(self.location)
else:
raise TypeError('find_independent needs to transform a point or pose')
def create_serialized(location, rotation):
if not len(location) == 3:
location = (0, 0, 0)
if not len(rotation) == 3:
rotation = (0, 0, 0)
return {'location': location, 'rotation': rotation}
def deserialize(s_transform):
"""
Convert a serialized
:param s_transform:
:return:
"""
if s_transform is None:
return UnrealTransform()
return UnrealTransform(
location=s_transform['location'] if 'location' in s_transform else (0, 0, 0),
rotation=s_transform['rotation'] if 'rotation' in s_transform else (0, 0, 0)
)
def euler2mat(roll, pitch, yaw):
"""
Create a rotation matrix for the orientation expressed by this transform.
Copied directly from FRotationTranslationMatrix::FRotationTranslationMatrix
in Engine/Source/Runtime/Core/Public/Math/RotationTranslationMatrix.h ln 32
:return:
"""
angles = _TORAD * np.array((roll, pitch, yaw))
sr, sp, sy = np.sin(angles)
cr, cp, cy = np.cos(angles)
return np.array([
[cp * cy, sr * sp * cy - cr * sy, -(cr * sp * cy + sr * sy)],
[cp * sy, sr * sp * sy + cr * cy, cy * sr - cr * sp * sy],
[sp, -sr * cp, cr * cp]
])
def mat2euler(mat):
"""
Go back from a rotation matrix to euler angles
This is copied from FMatrix::Rotator(),
in Engine/Source/Runtime/Core/Private/Math/UnrealMath.cpp ln 473
:param mat:
:return:
"""
x_axis = mat[0, 0:3]
y_axis = mat[1, 0:3]
z_axis = mat[2, 0:3]
pitch = np.arctan2(x_axis[2], np.sqrt(x_axis[0] * x_axis[0] + x_axis[1] * x_axis[1])) * _TODEG
yaw = np.arctan2(x_axis[1], x_axis[0]) / _TORAD
temp_mat = euler2mat(0, pitch, yaw)
sy_axis = temp_mat[1, 0:3]
# For some crazy reason '|' means dot product for unreal FVector
roll = np.arctan2(np.dot(z_axis, sy_axis), np.dot(y_axis, sy_axis)) * _TODEG
return roll, pitch, yaw
def euler2quat(roll, pitch, yaw):
"""
Convert Unreal Euler angles to a quaternion.
Based on FRotator::Quaternion in
Engine/Source/Runtime/Core/Private/Math/UnrealMath.cpp ln 373
:param roll: Roll angle
:param pitch: Pitch angle
:param yaw: Yaw angle
:return: A tuple quaternion in unreal space, w first
"""
angles = np.array([roll, pitch, yaw])
angles = angles * _TORAD / 2
sr, sp, sy = np.sin(angles)
cr, cp, cy = np.cos(angles)
x = cr * sp * sy - sr * cp * cy
y = -cr * sp * cy - sr * cp * sy
z = cr * cp * sy - sr * sp * cy
w = cr * cp * cy + sr * sp * sy
return w, x, y, z
def quat2euler(w, x, y, z):
"""
Convert a quaternion in unreal space to euler angles.
Based on FQuat::Rotator in
Engine/Source/Runtime/Core/Private/Math/UnrealMath.cpp ln 536
which is in turn based on
https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
:param w:
:param x:
:param y:
:param z:
:return:
"""
SINGULARITY_THRESHOLD = 0.4999995
singularity_test = z * x - w * y
yaw_y = 2 * (w * z + x * y)
yaw_x = 1 - 2 * (y * y + z * z)
yaw = np.arctan2(yaw_y, yaw_x) * _TODEG
if singularity_test < -SINGULARITY_THRESHOLD:
pitch = -90
roll = _clamp_axis(-yaw - 2 * np.arctan2(x, w) * _TODEG)
elif singularity_test > SINGULARITY_THRESHOLD:
pitch = 90
roll = _clamp_axis(yaw - 2 * np.arctan2(x, w) * _TODEG)
else:
pitch = np.arcsin(2 * singularity_test) / _TORAD
roll = np.arctan2(-2 * (w * x + y * z), (1 - 2 * (x * x + y * y))) * _TODEG
return roll, pitch, yaw
def _clamp_axis(angle):
angle %= 360
if angle < -180:
angle += 360
return angle
def transform_to_unreal(pose):
"""
Swap the coordinate frames from my standard coordinate frame
to the one used by unreal
:param pose: A point, as any 3-length indexable, or a Transform object
:return: An UnrealTransform object.
"""
if isinstance(pose, mytf.Transform):
location = (100 * pose.location[0], -100 * pose.location[1], 100 * pose.location[2])
rotation = pose.rotation_quat(w_first=True)
# Invert Y axis to go to quaternion in unreal frame
rotation = (rotation[0], rotation[1], -rotation[2], rotation[3])
# Invert the direction of rotation since we're now in a left handed coordinate frame
rotation = tf.quaternions.qinverse(rotation)
# Change the axis order to roll, pitch, yaw in UE coordinates
return UnrealTransform(location=location, rotation=quat2euler(rotation[0], rotation[1], rotation[2], rotation[3]))
return 100 * pose[0], -100 * pose[1], 100 * pose[2]
def transform_from_unreal(pose):
"""
Swap the coordinate frames from unreal coordinates
to my standard convention
:param pose: A point, as any 3-indexable, or a UnrealTransform object
:return: A point or Transform object, depending on the parameter
"""
if isinstance(pose, UnrealTransform):
location = (pose.location[0] / 100, -pose.location[1] / 100, pose.location[2] / 100)
rotation = euler2quat(pose.roll, pose.pitch, pose.yaw)
# Invert the direction of rotation to go to a right-handed coordinate frame
rotation = tf.quaternions.qinverse(rotation)
# Invert Y-axis to go to my coordinate frame
rotation = (rotation[0], rotation[1], -rotation[2], rotation[3])
return mytf.Transform(location=location, rotation=rotation, w_first=True)
return pose[0] / 100, -pose[1] / 100, pose[2] / 100
| |
import gc
from clang.cindex import CursorKind
from clang.cindex import TranslationUnit
from clang.cindex import TypeKind
from .util import get_cursor
from .util import get_cursors
from .util import get_tu
kInput = """\
struct s0 {
int a;
int b;
};
struct s1;
void f0(int a0, int a1) {
int l0, l1;
if (a0)
return;
for (;;) {
break;
}
}
"""
def test_get_children():
tu = get_tu(kInput)
it = tu.cursor.get_children()
tu_nodes = list(it)
assert len(tu_nodes) == 3
for cursor in tu_nodes:
assert cursor.translation_unit is not None
assert tu_nodes[0] != tu_nodes[1]
assert tu_nodes[0].kind == CursorKind.STRUCT_DECL
assert tu_nodes[0].spelling == 's0'
assert tu_nodes[0].is_definition() == True
assert tu_nodes[0].location.file.name == 't.c'
assert tu_nodes[0].location.line == 1
assert tu_nodes[0].location.column == 8
assert tu_nodes[0].hash > 0
assert tu_nodes[0].translation_unit is not None
s0_nodes = list(tu_nodes[0].get_children())
assert len(s0_nodes) == 2
assert s0_nodes[0].kind == CursorKind.FIELD_DECL
assert s0_nodes[0].spelling == 'a'
assert s0_nodes[0].type.kind == TypeKind.INT
assert s0_nodes[1].kind == CursorKind.FIELD_DECL
assert s0_nodes[1].spelling == 'b'
assert s0_nodes[1].type.kind == TypeKind.INT
assert tu_nodes[1].kind == CursorKind.STRUCT_DECL
assert tu_nodes[1].spelling == 's1'
assert tu_nodes[1].displayname == 's1'
assert tu_nodes[1].is_definition() == False
assert tu_nodes[2].kind == CursorKind.FUNCTION_DECL
assert tu_nodes[2].spelling == 'f0'
assert tu_nodes[2].displayname == 'f0(int, int)'
assert tu_nodes[2].is_definition() == True
def test_references():
"""Ensure that references to TranslationUnit are kept."""
tu = get_tu('int x;')
cursors = list(tu.cursor.get_children())
assert len(cursors) > 0
cursor = cursors[0]
assert isinstance(cursor.translation_unit, TranslationUnit)
# Delete reference to TU and perform a full GC.
del tu
gc.collect()
assert isinstance(cursor.translation_unit, TranslationUnit)
# If the TU was destroyed, this should cause a segfault.
parent = cursor.semantic_parent
def test_canonical():
source = 'struct X; struct X; struct X { int member; };'
tu = get_tu(source)
cursors = []
for cursor in tu.cursor.get_children():
if cursor.spelling == 'X':
cursors.append(cursor)
assert len(cursors) == 3
assert cursors[1].canonical == cursors[2].canonical
def test_is_static_method():
"""Ensure Cursor.is_static_method works."""
source = 'class X { static void foo(); void bar(); };'
tu = get_tu(source, lang='cpp')
cls = get_cursor(tu, 'X')
foo = get_cursor(tu, 'foo')
bar = get_cursor(tu, 'bar')
assert cls is not None
assert foo is not None
assert bar is not None
assert foo.is_static_method()
assert not bar.is_static_method()
def test_underlying_type():
tu = get_tu('typedef int foo;')
typedef = get_cursor(tu, 'foo')
assert typedef is not None
assert typedef.kind.is_declaration()
underlying = typedef.underlying_typedef_type
assert underlying.kind == TypeKind.INT
kParentTest = """\
class C {
void f();
}
void C::f() { }
"""
def test_semantic_parent():
tu = get_tu(kParentTest, 'cpp')
curs = get_cursors(tu, 'f')
decl = get_cursor(tu, 'C')
assert(len(curs) == 2)
assert(curs[0].semantic_parent == curs[1].semantic_parent)
assert(curs[0].semantic_parent == decl)
def test_lexical_parent():
tu = get_tu(kParentTest, 'cpp')
curs = get_cursors(tu, 'f')
decl = get_cursor(tu, 'C')
assert(len(curs) == 2)
assert(curs[0].lexical_parent != curs[1].lexical_parent)
assert(curs[0].lexical_parent == decl)
assert(curs[1].lexical_parent == tu.cursor)
def test_enum_type():
tu = get_tu('enum TEST { FOO=1, BAR=2 };')
enum = get_cursor(tu, 'TEST')
assert enum is not None
assert enum.kind == CursorKind.ENUM_DECL
enum_type = enum.enum_type
assert enum_type.kind == TypeKind.UINT
def test_enum_type_cpp():
tu = get_tu('enum TEST : long long { FOO=1, BAR=2 };', lang="cpp")
enum = get_cursor(tu, 'TEST')
assert enum is not None
assert enum.kind == CursorKind.ENUM_DECL
assert enum.enum_type.kind == TypeKind.LONGLONG
def test_objc_type_encoding():
tu = get_tu('int i;', lang='objc')
i = get_cursor(tu, 'i')
assert i is not None
assert i.objc_type_encoding == 'i'
def test_enum_values():
tu = get_tu('enum TEST { SPAM=1, EGG, HAM = EGG * 20};')
enum = get_cursor(tu, 'TEST')
assert enum is not None
assert enum.kind == CursorKind.ENUM_DECL
enum_constants = list(enum.get_children())
assert len(enum_constants) == 3
spam, egg, ham = enum_constants
assert spam.kind == CursorKind.ENUM_CONSTANT_DECL
assert spam.enum_value == 1
assert egg.kind == CursorKind.ENUM_CONSTANT_DECL
assert egg.enum_value == 2
assert ham.kind == CursorKind.ENUM_CONSTANT_DECL
assert ham.enum_value == 40
def test_enum_values_cpp():
tu = get_tu('enum TEST : long long { SPAM = -1, HAM = 0x10000000000};', lang="cpp")
enum = get_cursor(tu, 'TEST')
assert enum is not None
assert enum.kind == CursorKind.ENUM_DECL
enum_constants = list(enum.get_children())
assert len(enum_constants) == 2
spam, ham = enum_constants
assert spam.kind == CursorKind.ENUM_CONSTANT_DECL
assert spam.enum_value == -1
assert ham.kind == CursorKind.ENUM_CONSTANT_DECL
assert ham.enum_value == 0x10000000000
def test_annotation_attribute():
tu = get_tu('int foo (void) __attribute__ ((annotate("here be annotation attribute")));')
foo = get_cursor(tu, 'foo')
assert foo is not None
for c in foo.get_children():
if c.kind == CursorKind.ANNOTATE_ATTR:
assert c.displayname == "here be annotation attribute"
break
else:
assert False, "Couldn't find annotation"
def test_result_type():
tu = get_tu('int foo();')
foo = get_cursor(tu, 'foo')
assert foo is not None
t = foo.result_type
assert t.kind == TypeKind.INT
def test_get_tokens():
"""Ensure we can map cursors back to tokens."""
tu = get_tu('int foo(int i);')
foo = get_cursor(tu, 'foo')
tokens = list(foo.get_tokens())
assert len(tokens) == 7
assert tokens[0].spelling == 'int'
assert tokens[1].spelling == 'foo'
def test_get_arguments():
tu = get_tu('void foo(int i, int j);')
foo = get_cursor(tu, 'foo')
arguments = list(foo.get_arguments())
assert len(arguments) == 2
assert arguments[0].spelling == "i"
assert arguments[1].spelling == "j"
def test_referenced():
tu = get_tu('void foo(); void bar() { foo(); }')
foo = get_cursor(tu, 'foo')
bar = get_cursor(tu, 'bar')
for c in bar.get_children():
if c.kind == CursorKind.CALL_EXPR:
assert c.referenced.spelling == foo.spelling
break
| |
from __future__ import division
from Box2D import *
import copy
from euclid import *
from itertools import *
from math import *
import cPickle as pickle
import pyglet
from pyglet.gl import *
import rabbyt
import random
import sys
from torn.geometry import *
from torn import ik
def rad_to_deg(angle_rad):
return angle_rad * 180 / pi
def create_aabb(lower_bound=(-1, -1), upper_bound=(1, 1)):
aabb = b2AABB()
aabb.lowerBound = lower_bound
aabb.upperBound = upper_bound
return aabb
def create_world(lower_bound=(-100, -100), upper_bound=(100, 100),
gravity=(0, -10), do_sleep=True):
aabb = create_aabb(lower_bound, upper_bound)
return b2World(aabb, gravity, do_sleep)
def draw_polygon(points, closed=True):
if closed:
points = points + points[:1]
vertices = zip(points[:-1], points[1:])
vertices = tuple(chain(*chain(*vertices)))
pyglet.graphics.draw(len(vertices) // 2, GL_LINES, ('v2f', vertices))
def draw_circle(center=(0, 0), radius=1, vertex_count=100):
x, y = center
vertices = []
for i in xrange(vertex_count):
angle = 2 * pi * i / vertex_count
vertices.append((x + radius * cos(angle), y + radius * sin(angle)))
draw_polygon(vertices)
def save_screenshot(name='screenshot.png', format='RGB'):
image = pyglet.image.get_buffer_manager().get_color_buffer().image_data
image.format = format
image.save(name)
class Camera(object):
def __init__(self, translation=None, scale=1):
if translation is None:
self.translation = Vector2()
else:
assert isinstance(translation, Vector2)
self.translation = translation
assert scale > 0
self.scale = scale
def get_screen_point(self, world_point):
assert isinstance(world_point, Point2)
screen_point = world_point * self.scale - self.translation
return Point2(*screen_point)
def get_world_point(self, screen_point):
assert isinstance(screen_point, Point2)
world_point = (screen_point - self.translation) / self.scale
return Point2(*world_point)
def transform_view(self):
glTranslatef(self.translation.x, self.translation.y, 0)
glScalef(self.scale, self.scale, self.scale)
def load_object(path):
file_ = open(path, 'rb')
return pickle.load(file_)
def save_object(obj, path):
file_ = open(path, 'wb')
pickle.dump(obj, file_, pickle.HIGHEST_PROTOCOL)
class Skeleton(object):
def __init__(self):
self.torso = Polygon([Point2(-0.5, -0.5), Point2(0.5, -0.5),
Point2(0.5, 0.5), Point2(-0.5, 0.5)])
self.limbs = []
@property
def polygons(self):
return [self.torso] + self.limbs
@property
def vertices(self):
vertices = list(self.torso.vertices)
for limb in self.limbs:
vertices.extend(limb.vertices)
return vertices
class MyWindow(pyglet.window.Window):
def __init__(self, fps=False, **kwargs):
super(MyWindow, self).__init__(**kwargs)
rabbyt.set_default_attribs()
glClearColor(1, 1, 1, 0)
glColor3f(0, 0, 0)
self.fps = fps
self.fps_display = pyglet.clock.ClockDisplay()
if '--animation-editor' in sys.argv:
self.my_screen = AnimationEditor(self)
elif '--skeleton-editor' in sys.argv:
self.my_screen = SkeletonEditor(self)
elif '--skin-editor' in sys.argv:
self.my_screen = SkinEditor(self)
else:
self.my_screen = GameScreen(self)
def on_draw(self):
self.my_screen.on_draw()
if self.fps:
self.fps_display.draw()
def on_close(self):
self.my_screen.on_close()
super(MyWindow, self).on_close()
def on_mouse_press(self, x, y, button, modifiers):
self.my_screen.on_mouse_press(x, y, button, modifiers)
def on_mouse_release(self, x, y, button, modifiers):
self.my_screen.on_mouse_release(x, y, button, modifiers)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
self.my_screen.on_mouse_drag(x, y, dx, dy, buttons, modifiers)
def on_key_press(self, symbol, modifiers):
if symbol == pyglet.window.key.ESCAPE:
self.on_close()
elif symbol == pyglet.window.key.F12:
save_screenshot('torn-screenshot.png')
else:
self.my_screen.on_key_press(symbol, modifiers)
def on_key_release(self, symbol, modifiers):
self.my_screen.on_key_release(symbol, modifiers)
class Screen(object):
def on_close(self):
pass
def on_draw(self):
pass
def on_mouse_press(self, x, y, button, modifiers):
pass
def on_mouse_release(self, x, y, button, modifiers):
pass
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
pass
def on_key_press(self, symbol, modifiers):
pass
def on_key_release(self, symbol, modifiers):
pass
class GameScreen(Screen):
def __init__(self, window):
self.window = window
self.time = 0
self.dt = 1 / 60
self.level = Level()
pyglet.clock.schedule_interval(self.step, self.dt)
def on_close(self):
pyglet.clock.unschedule(self.step)
def on_draw(self):
self.window.clear()
def step(self, dt):
self.time += dt
while self.level.time + self.dt < self.time:
self.level.step(self.dt)
class SkeletonEditor(Screen):
def __init__(self, window):
self.window = window
translation = Vector2(self.window.width / 2, self.window.height / 2)
scale = min(self.window.width, self.window.height) / 3.5
self.camera = Camera(translation, scale)
try:
self.skeleton = load_object('torn-skeleton.pickle')
except:
self.skeleton = Skeleton()
self.drag_vertex = None
self.history = []
self.screen_epsilon = 10
self.pan_step = 20
self.zoom_step = 1.2
def on_close(self):
save_object(self.skeleton, 'torn-skeleton.pickle')
def on_draw(self):
self.window.clear()
glPushMatrix()
self.camera.transform_view()
self.draw_skeleton()
glPopMatrix()
def draw_skeleton(self):
for polygon in self.skeleton.polygons:
draw_polygon(polygon.vertices, polygon.closed)
for vertex in self.skeleton.vertices:
draw_circle(vertex, self.screen_epsilon / self.camera.scale)
def on_mouse_press(self, x, y, button, modifiers):
self.history.append(copy.deepcopy(self.skeleton))
self.drag_vertex = None
point = self.camera.get_world_point(Point2(x, y))
epsilon = self.screen_epsilon / self.camera.scale
# First option, drag an existing vertex.
vertices = filter(Circle(point, epsilon).intersect,
self.skeleton.vertices)
if vertices:
self.drag_vertex = random.choice(vertices)
# Second option, split an existing edge and drag the new vertex.
if self.drag_vertex is None:
self.drag_vertex = self.drag_edge(point, epsilon)
# Last option, create a new limb.
if self.drag_vertex is None:
limb = Polygon([point, point], closed=False)
self.drag_vertex = limb.vertices[-1]
self.skeleton.limbs.append(limb)
def drag_edge(self, point, epsilon):
assert isinstance(point, Point2)
for polygon in self.skeleton.polygons:
for i, edge in enumerate(polygon.edges):
v1, v2 = edge
if v1 == v2:
connection = point.connect(v1)
else:
connection = point.connect(LineSegment2(v1, v2))
if connection.length < epsilon:
vertex = connection.p2.copy()
polygon.vertices[i + 1:i + 1] = [vertex]
return vertex
return None
def on_mouse_release(self, x, y, button, modifiers):
epsilon = 2 * self.screen_epsilon / self.camera.scale
vertices = filter(Circle(self.drag_vertex, epsilon).intersect,
self.skeleton.vertices)
if len(vertices) >= 2:
self.delete_skeleton_vertex(self.drag_vertex)
def delete_skeleton_vertex(self, vertex):
for polygon in self.skeleton.polygons:
if vertex in polygon.vertices:
polygon.vertices.remove(vertex)
if (len(polygon.vertices) < 2 and
polygon in self.skeleton.limbs):
self.skeleton.limbs.remove(polygon)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
self.drag_vertex[:] = self.camera.get_world_point(Point2(x, y))
def on_key_press(self, symbol, modifiers):
if symbol == pyglet.window.key.BACKSPACE:
if self.history:
self.skeleton = self.history.pop()
if symbol == pyglet.window.key.LEFT:
self.camera.translation.x += self.pan_step
if symbol == pyglet.window.key.RIGHT:
self.camera.translation.x -= self.pan_step
if symbol == pyglet.window.key.UP:
self.camera.translation.y -= self.pan_step
if symbol == pyglet.window.key.DOWN:
self.camera.translation.y += self.pan_step
if symbol == pyglet.window.key.PLUS:
self.camera.scale *= self.zoom_step
if symbol == pyglet.window.key.MINUS:
self.camera.scale /= self.zoom_step
class Scrap(object):
def __init__(self, name, position=None, scale=1, angle=0):
if position is None:
position = Point2()
assert isinstance(position, Point2)
assert type(scale) in (int, float)
assert type(angle) in (int, float)
self.name = name
self.position = position
self.scale = scale
self.angle = angle
class Skin(object):
def __init__(self):
self.scraps = []
class View(object):
pass
class ScrapView(View):
def __init__(self, scrap):
self.scrap = scrap
self.sprite = rabbyt.Sprite(self.scrap.name, scale=self.scrap.scale,
rot=rad_to_deg(self.scrap.angle))
self.sprite.xy = self.scrap.position
texture = self.sprite.texture
self.texture_radius = (texture.width + texture.height) / 4
self.radius = self.scrap.scale * self.texture_radius
self.direction = Vector2(cos(self.scrap.angle), sin(self.scrap.angle))
def _get_position(self):
return self.scrap.position
def _set_position(self, position):
assert isinstance(position, Point2)
self.scrap.position[:] = position
self.sprite.xy = position
position = property(_get_position, _set_position)
def _get_transform(self):
return self.scrap.position + self.radius * self.direction
def _set_transform(self, transform):
assert isinstance(transform, Point2)
vector = transform - self.scrap.position
self.radius = abs(vector)
self.direction = vector.normalized()
self.scrap.scale = self.radius / self.texture_radius
self.scrap.angle = atan2(self.direction.y, self.direction.x)
self.sprite.scale = self.scrap.scale
self.sprite.rot = rad_to_deg(self.scrap.angle)
transform = property(_get_transform, _set_transform)
def draw(self, mouse_radius):
self.sprite.render()
glDisable(GL_TEXTURE_2D)
glColor3f(0, 0, 0)
glLineWidth(3)
draw_circle(self.scrap.position, mouse_radius)
draw_circle(self.scrap.position, self.radius)
draw_circle(self.scrap.position + self.radius * self.direction,
mouse_radius)
glColor3f(1, 1, 1)
glLineWidth(1)
draw_circle(self.scrap.position, mouse_radius)
draw_circle(self.scrap.position, self.radius)
draw_circle(self.scrap.position + self.radius * self.direction,
mouse_radius)
class SkinView(View):
def __init__(self, skin):
self.skin = skin
self.scrap_views = [ScrapView(s) for s in self.skin.scraps]
def draw(self, mouse_radius):
for scrap_view in self.scrap_views:
scrap_view.draw(mouse_radius)
class SkinEditor(Screen):
def __init__(self, window):
self.window = window
translation = Vector2(self.window.width / 2, self.window.height / 2)
scale = min(self.window.width, self.window.height) / 3.5
self.camera = Camera(translation, scale)
self.mouse_radius = 10
try:
self.skin = load_object('torn-skin.pickle')
except:
self.skin = Skin()
self.skin.scraps.append(Scrap(name='torso.png', scale=0.005))
self.skin.scraps.append(Scrap(name='head.png', scale=0.005))
self.skin_view = SkinView(self.skin)
def on_draw(self):
self.window.clear()
glPushMatrix()
self.camera.transform_view()
self.skin_view.draw(self.mouse_radius / self.camera.scale)
glPopMatrix()
def on_close(self):
save_object(self.skin, 'torn-skin.pickle')
def on_mouse_press(self, x, y, button, modifiers):
mouse_point = self.camera.get_world_point(Vector2(x, y))
mouse_radius = self.mouse_radius / self.camera.scale
mouse_circle = Circle(mouse_point, mouse_radius)
for scrap_view in self.skin_view.scrap_views:
if mouse_circle.intersect(scrap_view.position):
ScrapPositionController(self, scrap_view)
break
if mouse_circle.intersect(scrap_view.transform):
ScrapTransformController(self, scrap_view)
break
class Controller(object):
pass
class ScrapPositionController(object):
def __init__(self, editor, scrap_view):
self.editor = editor
self.scrap_view = scrap_view
self.editor.window.push_handlers(self)
def on_mouse_drag(self, x, y, dx, dy, button, modifiers):
position = self.editor.camera.get_world_point(Vector2(x, y))
self.scrap_view.position = position
return pyglet.event.EVENT_HANDLED
def on_mouse_release(self, x, y, button, modifiers):
self.editor.window.pop_handlers()
return pyglet.event.EVENT_HANDLED
class ScrapTransformController(object):
def __init__(self, editor, scrap_view):
self.editor = editor
self.scrap_view = scrap_view
self.editor.window.push_handlers(self)
def on_mouse_drag(self, x, y, dx, dy, button, modifiers):
transform = self.editor.camera.get_world_point(Vector2(x, y))
self.scrap_view.transform = transform
return pyglet.event.EVENT_HANDLED
def on_mouse_release(self, x, y, button, modifiers):
self.editor.window.pop_handlers()
return pyglet.event.EVENT_HANDLED
class Pose(object):
def __init__(self, skeleton):
assert isinstance(skeleton, Skeleton)
self.targets = [l.vertices[-1].copy() for l in skeleton.limbs]
class Animation(object):
def __init__(self, skeleton, looped=True):
assert isinstance(skeleton, Skeleton)
assert type(looped) is bool
self.poses = [Pose(skeleton)]
self.looped = looped
class AnimationEditor(Screen):
def __init__(self, window):
self.window = window
translation = Vector2(self.window.width / 2, self.window.height / 2)
scale = min(self.window.width, self.window.height) / 3.5
self.camera = Camera(translation, scale)
self.screen_epsilon = 10
self.skeleton = load_object('torn-skeleton.pickle')
try:
self.animation = load_object('torn-animation.pickle')
except:
self.animation = Animation(self.skeleton)
self.pose_index = 0
self.history = []
self.drag_limbs = self.get_drag_limbs()
self.limb_index = None
self.pan_step = 20
self.zoom_step = 1.2
def get_drag_limbs(self):
limbs = []
pose = self.animation.poses[self.pose_index]
for i, limb in enumerate(self.skeleton.limbs):
vertices = ik.solve(limb.vertices, pose.targets[i])
limbs.append(Polygon(vertices, closed=False))
return limbs
def on_close(self):
save_object(self.animation, 'torn-animation.pickle')
def on_draw(self):
self.window.clear()
glPushMatrix()
self.camera.transform_view()
self.draw_pose()
glPopMatrix()
self.draw_timeline()
def draw_pose(self):
glColor3f(0, 0, 0)
draw_polygon(self.skeleton.torso.vertices, True)
for i, limb in enumerate(self.drag_limbs):
glColor3f(0, 0, 0)
draw_polygon(limb.vertices, limb.closed)
draw_circle(limb.vertices[-1],
self.screen_epsilon / self.camera.scale)
def draw_timeline(self):
point_count = len(self.animation.poses)
if point_count >= 2 and self.animation.looped:
point_count += 1
width = self.window.width / point_count
y = 2 * self.screen_epsilon
glColor3f(0.5, 0.5, 0.5)
draw_polygon([(width / 2, y), (self.window.width - width / 2, y)],
closed=False)
for i in xrange(point_count):
current = (i % len(self.animation.poses)) == self.pose_index
color = 0 if current else 0.5
glColor3f(color, color, color)
x = width / 2 + i * width
draw_circle((x, y), self.screen_epsilon)
def on_mouse_press(self, x, y, button, modifiers):
self.history.append((self.pose_index, copy.deepcopy(self.animation)))
self.drag_vertex = None
mouse_point = self.camera.get_world_point(Point2(x, y))
epsilon = self.screen_epsilon / self.camera.scale
mouse_circle = Circle(mouse_point, epsilon)
for i, limb in enumerate(self.drag_limbs):
if mouse_circle.intersect(limb.vertices[-1]):
self.limb_index = i
break
def on_mouse_drag(self, x, y, dx, dy, button, modifiers):
if self.limb_index is None:
return
mouse_point = self.camera.get_world_point(Point2(x, y))
limb = self.skeleton.limbs[self.limb_index]
vertices = ik.solve(limb.vertices, mouse_point)
self.drag_limbs[self.limb_index] = Polygon(vertices, closed=False)
pose = self.animation.poses[self.pose_index]
pose.targets[self.limb_index] = vertices[-1].copy()
def on_mouse_release(self, x, y, button, modifiers):
self.limb_index = None
def on_key_press(self, symbol, modifiers):
if symbol == pyglet.window.key.BACKSPACE:
if self.history:
self.pose_index, self.animation = self.history.pop()
self.drag_limbs = self.get_drag_limbs()
if symbol == pyglet.window.key.INSERT:
pose = copy.deepcopy(self.animation.poses[self.pose_index])
self.animation.poses[self.pose_index:self.pose_index] = [pose]
if symbol == pyglet.window.key.DELETE:
if len(self.animation.poses) >= 2:
del self.animation.poses[self.pose_index]
self.pose_index = min(self.pose_index,
len(self.animation.poses) - 1)
if symbol == pyglet.window.key.PAGEUP:
self.pose_index -= 1
self.pose_index %= len(self.animation.poses)
self.drag_limbs = self.get_drag_limbs()
if symbol == pyglet.window.key.PAGEDOWN:
self.pose_index += 1
self.pose_index %= len(self.animation.poses)
self.drag_limbs = self.get_drag_limbs()
if symbol == pyglet.window.key.LEFT:
self.camera.translation.x += self.pan_step
if symbol == pyglet.window.key.RIGHT:
self.camera.translation.x -= self.pan_step
if symbol == pyglet.window.key.UP:
self.camera.translation.y -= self.pan_step
if symbol == pyglet.window.key.DOWN:
self.camera.translation.y += self.pan_step
if symbol == pyglet.window.key.PLUS:
self.camera.scale *= self.zoom_step
if symbol == pyglet.window.key.MINUS:
self.camera.scale /= self.zoom_step
class Level(object):
def __init__(self):
self.time = 0
self.world = create_world()
def step(self, dt):
self.time += dt
self.world.Step(dt, 10, 10)
def main():
if '-h' in sys.argv or '--help' in sys.argv:
print """
Options:
--animation-editor Start the animation editor.
-h, --help You're looking at it.
--skeleton-editor Start the skeleton editor.
--skin-editor Start the skin editor.
--windowed Enable windowed mode.
""".strip()
return
fps = '--fps' in sys.argv
fullscreen = '--fullscreen' in sys.argv
window = MyWindow(fps=fps, fullscreen=fullscreen)
pyglet.app.run()
if __name__ == '__main__':
main()
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Grappler LayoutOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import device_properties_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.grappler import cluster as gcluster
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.layers import convolutional as conv_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
def _weight(shape):
"""Generates a weight of a given shape."""
return random_ops.truncated_normal(shape, seed=0, stddev=0.1)
def _bias(shape):
"""Generates a bias of a given shape."""
return constant_op.constant(0.1, shape=shape)
def _conv2d(x, w):
"""Returns a 2d convolution layer with full stride."""
return nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
def _max_pool_2x2(x):
"""Downsamples a feature map by 2X."""
return nn.max_pool(
x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Taken from tensorflow/examples/tutorials/mnist/mnist_deep.py
def _two_layer_model(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
b_conv1 = _bias([32])
h_conv1 = nn.relu(_conv2d(x_image, w_conv1) + b_conv1)
h_pool1 = _max_pool_2x2(h_conv1)
w_conv2 = _weight([5, 5, 32, 64])
b_conv2 = _bias([64])
h_conv2 = nn.relu(_conv2d(h_pool1, w_conv2) + b_conv2)
h_pool2 = _max_pool_2x2(h_conv2)
return h_pool2
def _model_with_second_port():
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([2, 5, 5, 4], seed=0)
scale = constant_op.constant(0.1, shape=[4])
offset = constant_op.constant(0.3, shape=[4])
y, mean, _ = nn.fused_batch_norm(x, scale, offset)
mul = math_ops.add(y, mean)
output = array_ops.identity(mul)
return output
def _model_with_branch(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
w_conv2 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
c_conv2 = _conv2d(x_image, w_conv2)
add = math_ops.add(c_conv1, c_conv2)
return add
def _model_with_vec_and_4d(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
vector = constant_op.constant(6.4, shape=[32])
add = math_ops.add(c_conv1, vector)
return add
def _loop():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(_two_layer_model, elems, dtype=dtypes.float32)
return outputs
def _loop_with_branch():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(
_model_with_branch, elems, dtype=dtypes.float32)
return outputs
def _loop_with_vec_and_4d():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(
_model_with_vec_and_4d, elems, dtype=dtypes.float32)
return outputs
def _get_config(layout_optimizer=True):
if layout_optimizer:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON)
else:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrite_options, build_cost_model=1)
config = config_pb2.ConfigProto(graph_options=graph_options)
return config
def _simple_metagraph(depthwise=False):
random_seed.set_random_seed(0)
x = variables.Variable(random_ops.truncated_normal([1, 200, 200, 3], seed=0))
conv = conv_layers.separable_conv2d if depthwise else conv_layers.conv2d
y = conv(x, 32, [3, 3])
z = conv(y, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
loss = math_ops.reduce_mean(z)
train_op = optimizer.minimize(loss)
graph = ops.get_default_graph()
graph.add_to_collection('train_op', train_op)
meta_graph = saver_lib.export_meta_graph(graph_def=graph.as_graph_def())
return meta_graph
def _get_cluster():
named_device = device_properties_pb2.NamedDevice()
named_device.name = '/GPU:0'
named_device.properties.type = 'GPU'
named_device.properties.environment['architecture'] = '4'
cluster = gcluster.Cluster(devices=[named_device])
return cluster
class LayoutOptimizerTest(test.TestCase):
"""Tests the Grappler layout optimizer."""
def _train(self, checkpoint_path, layout_optimizer=False, restore=False):
ops.reset_default_graph()
graph = ops.get_default_graph()
with session.Session(
config=_get_config(layout_optimizer), graph=graph) as sess:
batch = 2
height = 6
width = 7
input_channels = 3
shape = [batch, height, width, input_channels]
image = array_ops.placeholder(dtype='float32', shape=shape)
conv1 = conv_layers.conv2d(image, 32, [3, 3])
conv2 = conv_layers.conv2d(conv1, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
loss = math_ops.reduce_mean(conv2)
train_op = optimizer.minimize(loss)
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
if restore:
saver.restore(sess, checkpoint_path)
else:
sess.run(variables.global_variables_initializer())
np.random.seed(0)
for _ in range(2):
image_val = np.random.rand(*shape).astype(np.float32)
sess.run([loss, train_op], feed_dict={image: image_val})
if restore:
all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
all_vars_values = [var.eval(session=sess) for var in all_vars]
return all_vars_values
else:
saver.save(sess, checkpoint_path)
def testTwoConvLayers(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
output = _two_layer_model(x)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Relu_1-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSplitWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
split = array_ops.split(conv, 2, axis=dim)
output = math_ops.reduce_sum(split[0])
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-split-0-0', nodes)
self.assertIn('LayoutOptimizerDimMapNHWCToNCHW_split_0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSplitVWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
sizes = constant_op.constant([50, 10, 4], shape=[3])
split = gen_array_ops._split_v(
value=conv, size_splits=sizes, axis=dim, num_split=3)
output = math_ops.reduce_sum(split[0])
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-SplitV-0-0', nodes)
self.assertIn('LayoutOptimizerDimMapNHWCToNCHW_SplitV_2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testPadWithConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
paddings = constant_op.constant(
paddings_val, dtype='int32', name='PaddingsConst')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Pad-0-0', nodes)
self.assertIn('LayoutOptimizer-Pad-PaddingsConst', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testConcatWithControlDependency(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
axis = constant_op.constant(3)
var = variables.Variable(3)
assign = state_ops.assign(var, 6)
with ops.control_dependencies([assign]):
concat = array_ops.concat([conv, conv], axis)
output = array_ops.identity(concat)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-concat-0-0', nodes)
self.assertIn('LayoutOptimizer-concat-Const_2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testFill(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shape = array_ops.shape(conv)
scalar = array_ops.constant(5.7)
fill = array_ops.fill(shape, scalar)
output = array_ops.identity(fill)
x_val = [3.4] * 784
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
num_vec_permute = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
if node.name.startswith('LayoutOptimizerVecPermute'):
num_vec_permute += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
# Two vector permute nodes were initially added in the Expand phase of
# LayoutOptimizer; they cancelled out each other in the Collapse phase.
expected_vec_permute = 0
self.assertEqual(expected_vec_permute, num_vec_permute)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Fill-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReverseWithConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = constant_op.constant([3, 1], name='DimsConst')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-ReverseV2-0-0', nodes)
self.assertIn('LayoutOptimizer-ReverseV2-DimsConst', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReverseWithNonConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = array_ops.placeholder(dtype='int32')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
dims_val = [2, 3]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={dims: dims_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
dims: dims_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-ReverseV2-0-0', nodes)
self.assertIn('LayoutOptimizerDimMapNHWCToNCHW_ReverseV2_1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testTernaryOp(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
mean = math_ops.reduce_mean(conv)
condition = math_ops.less(conv, mean)
select = gen_math_ops._select(condition, conv, add)
output = array_ops.identity(select)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testPadWithNonConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings = array_ops.placeholder(dtype='int32')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={paddings: paddings_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
paddings: paddings_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Pad-0-0', nodes)
self.assertIn('LayoutOptimizerVecPermuteNHWCToNCHW_Pad_1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testMaxPoolV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool = gen_nn_ops._max_pool_v2(conv, ksize, strides, 'VALID')
output = array_ops.identity(max_pool)
strides_val = [1, 3, 2, 1]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-MaxPoolV2-0-0', nodes)
self.assertIn('LayoutOptimizerVecPermuteNHWCToNCHW_MaxPoolV2_2', nodes)
self.assertIn('LayoutOptimizer-MaxPoolV2-Const_2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testMaxPoolGradV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool_grad = gen_nn_ops.max_pool_grad_v2(conv, conv, conv, ksize,
strides, 'VALID')
output = array_ops.identity(max_pool_grad)
strides_val = [1, 3, 2, 1]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-MaxPoolGradV2-0-0',
nodes)
self.assertIn('LayoutOptimizerVecPermuteNHWCToNCHW_MaxPoolGradV2_4',
nodes)
self.assertIn('LayoutOptimizer-MaxPoolGradV2-Const_2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
size = array_ops.placeholder(dtype='int32')
s = array_ops.slice(conv, [0, 0, 0, 0], size)
output = array_ops.identity(s)
size_val = [1, 2, 3, 4]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={size: size_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
size: size_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Slice-0-0', nodes)
self.assertIn('LayoutOptimizerVecPermuteNHWCToNCHW_Slice_2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testShapeN(self):
if test.is_gpu_available(cuda_only=True):
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shapen = array_ops.shape_n([conv, conv])
output = math_ops.add(shapen[0], shapen[1])
x_val = [1.7] * 784
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerVecPermuteNCHWToNHWC-ShapeN-0-0', nodes)
self.assertAllEqual(output_val_ref, output_val)
def testLoop(self):
if test.is_gpu_available(cuda_only=True):
output = _loop()
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-map/while/Conv2D-0',
nodes)
self.assertIn(
'LayoutOptimizerTransposeNCHWToNHWC-map/while/MaxPool_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoopWithBranch(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_branch()
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-map/while/Conv2D-0',
nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-map/while/Add-0-2',
nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoopWithVecAnd4D(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_vec_and_4d()
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-map/while/Conv2D-0',
nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-map/while/Add-0-2',
nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testBinaryOpSecondPort(self):
if test.is_gpu_available(cuda_only=True):
output = _model_with_second_port()
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-FusedBatchNorm-0',
nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Add-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testGradient(self):
meta_graph = _simple_metagraph()
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON)
optimized_graph = tf_optimizer.OptimizeGraph(
rewrite_options, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in ['Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput']:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 5)
def testDepthwise(self):
meta_graph = _simple_metagraph(depthwise=True)
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON)
optimized_graph = tf_optimizer.OptimizeGraph(
rewrite_options, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in [
'DepthwiseConv2dNative', 'DepthwiseConv2dNativeBackpropFilter',
'DepthwiseConv2dNativeBackpropInput'
]:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 6)
def testCheckpointCompatibility(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
checkpoint_path = self.get_temp_dir()
self._train(checkpoint_path)
vars_expected = self._train(checkpoint_path, restore=True)
vars_layout_optimized = self._train(
checkpoint_path, restore=True, layout_optimizer=True)
for var_expected, var_layout_optimized in zip(vars_expected,
vars_layout_optimized):
self.assertAllClose(var_expected, var_layout_optimized, atol=1e-6)
if __name__ == '__main__':
test.main()
| |
import argparse
import os
from zipfile import ZipFile
from urllib.request import urlopen
import shutil
import pandas as pd
from time import time
from datetime import datetime
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard, CSVLogger
from keras.optimizers import Adam
import csv
from keras.models import Model, load_model
import numpy as np
from sklearn.metrics import confusion_matrix, classification_report
from keras import backend as K
from skimage.io import imread
from skimage.transform import resize
from keras.applications.inception_v3 import InceptionV3
from keras.applications.resnet50 import ResNet50
from keras.layers import Dense, GlobalAveragePooling2D
import requests
# Global paths
OUTPUT_DIRECTORY = "./outputs/"
LABEL_DIRECTORY = "./labels/"
MODEL_DIRECTORY = "./models/"
MODEL_GD_ID = "1MRbN5hXOTYnw7-71K-2vjY01uJ9GkQM5"
MODEL_ZIP_FILE = "./models/models.zip"
IMG_DIRECTORY = "./images/"
IMG_GD_ID = "1xnK3B6K6KekDI55vwJ0vnc2IGoDga9cj"
IMG_ZIP_FILE = "./images/images.zip"
# Global variables
RAW_IMG_SIZE = (256, 256)
IMG_SIZE = (224, 224)
INPUT_SHAPE = (IMG_SIZE[0], IMG_SIZE[1], 3)
MAX_EPOCH = 200
BATCH_SIZE = 32
FOLDS = 5
STOPPING_PATIENCE = 32
LR_PATIENCE = 16
INITIAL_LR = 0.0001
CLASSES = [0, 1, 2, 3, 4, 5, 6, 7, 8]
CLASS_NAMES = ['Chinee Apple',
'Lantana',
'Parkinsonia',
'Parthenium',
'Prickly Acacia',
'Rubber Vine',
'Siam Weed',
'Snake Weed',
'Negatives']
def download_google_drive_file(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def parse_args():
parser = argparse.ArgumentParser(description='Train and test ResNet50, InceptionV3, or custom model on DeepWeeds.')
parser.add_argument("command", default='train', help="'cross_validate' or 'inference'")
parser.add_argument('--model', default='resnet', help="'resnet', 'inception', or path to .hdf5 file.")
args = parser.parse_args()
return args.command, args.model
def download_images():
if not os.path.exists(IMG_DIRECTORY):
os.makedirs(IMG_DIRECTORY)
print("Downloading DeepWeeds images to " + IMG_ZIP_FILE)
download_google_drive_file(IMG_GD_ID, IMG_ZIP_FILE)
print("Finished downloading images.")
print("Unzipping " + IMG_ZIP_FILE)
with ZipFile(IMG_ZIP_FILE, "r") as zip_ref:
zip_ref.extractall(IMG_DIRECTORY)
print("Finished unzipping images.")
def download_models():
if not os.path.exists(MODEL_DIRECTORY):
os.makedirs(MODEL_DIRECTORY)
print("Downloading DeepWeeds models to " + MODEL_ZIP_FILE)
download_google_drive_file(MODEL_GD_ID, MODEL_ZIP_FILE)
print("Finished downloading models.")
print("Unzipping " + MODEL_ZIP_FILE)
with ZipFile(MODEL_ZIP_FILE, "r") as zip_ref:
zip_ref.extractall(MODEL_DIRECTORY)
print("Finished unzipping models.")
def crop(img, size):
"""
Crop the image concentrically to the desired size.
:param img: Input image
:param size: Required crop image size
:return:
"""
(h, w, c) = img.shape
x = int((w - size[0]) / 2)
y = int((h - size[1]) / 2)
return img[y:(y + size[1]), x:(x + size[0]), :]
def crop_generator(batches, size):
"""
Take as input a Keras ImageGen (Iterator) and generate random
crops from the image batches generated by the original iterator
:param batches: Batches of images to be cropped
:param size: Size to be cropped to
:return:
"""
while True:
batch_x, batch_y = next(batches)
(b, h, w, c) = batch_x.shape
batch_crops = np.zeros((b, size[0], size[1], c))
for i in range(b):
batch_crops[i] = crop(batch_x[i], (size[0], size[1]))
yield (batch_crops, batch_y)
def cross_validate(model_name):
# K fold cross validation, saving outputs for each fold
for k in range(FOLDS):
# Create new output directory for individual folds from timestamp
timestamp = datetime.fromtimestamp(time()).strftime('%Y%m%d-%H%M%S')
print('Fold {}/{} - {}'.format(k + 1, FOLDS, timestamp))
output_directory = "{}{}/".format(OUTPUT_DIRECTORY, timestamp)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Prepare training, validation and testing labels for kth fold
train_label_file = "{}train_subset{}.csv".format(LABEL_DIRECTORY, k)
val_label_file = "{}val_subset{}.csv".format(LABEL_DIRECTORY, k)
test_label_file = "{}test_subset{}.csv".format(LABEL_DIRECTORY, k)
train_dataframe = pd.read_csv(train_label_file)
val_dataframe = pd.read_csv(val_label_file)
test_dataframe = pd.read_csv(test_label_file)
train_image_count = train_dataframe.shape[0]
val_image_count = train_dataframe.shape[0]
test_image_count = test_dataframe.shape[0]
# Training image augmentation
train_data_generator = ImageDataGenerator(
rescale=1. / 255,
fill_mode="constant",
shear_range=0.2,
zoom_range=(0.5, 1),
horizontal_flip=True,
rotation_range=360,
channel_shift_range=25,
brightness_range=(0.75, 1.25))
# Validation image augmentation
val_data_generator = ImageDataGenerator(
rescale=1. / 255,
fill_mode="constant",
shear_range=0.2,
zoom_range=(0.5, 1),
horizontal_flip=True,
rotation_range=360,
channel_shift_range=25,
brightness_range=(0.75, 1.25))
# No testing image augmentation (except for converting pixel values to floats)
test_data_generator = ImageDataGenerator(rescale=1. / 255)
# Load train images in batches from directory and apply augmentations
train_data_generator = train_data_generator.flow_from_dataframe(
train_dataframe,
IMG_DIRECTORY,
x_col='Filename',
y_col='Label',
target_size=RAW_IMG_SIZE,
batch_size=BATCH_SIZE,
has_ext=True,
classes=CLASSES,
class_mode='categorical')
# Load validation images in batches from directory and apply rescaling
val_data_generator = val_data_generator.flow_from_dataframe(
val_dataframe,
IMG_DIRECTORY,
x_col="Filename",
y_col="Label",
target_size=RAW_IMG_SIZE,
batch_size=BATCH_SIZE,
has_ext=True,
classes=CLASSES,
class_mode='categorical')
# Load test images in batches from directory and apply rescaling
test_data_generator = test_data_generator.flow_from_dataframe(
test_dataframe,
IMG_DIRECTORY,
x_col="Filename",
y_col="Label",
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
has_ext=True,
shuffle=False,
classes=CLASSES,
class_mode='categorical')
# Crop augmented images from 256x256 to 224x224
train_data_generator = crop_generator(train_data_generator, IMG_SIZE)
val_data_generator = crop_generator(val_data_generator, IMG_SIZE)
# Load ImageNet pre-trained model with no top, either InceptionV3 or ResNet50
if model_name == "resnet":
base_model = ResNet50(weights='imagenet', include_top=False, input_shape=INPUT_SHAPE)
elif model_name == "inception":
base_model = InceptionV3(weights='imagenet', include_top=False, input_shape=INPUT_SHAPE)
x = base_model.output
# Add a global average pooling layer
x = GlobalAveragePooling2D(name='avg_pool')(x)
# Add fully connected output layer with sigmoid activation for multi label classification
outputs = Dense(len(CLASSES), activation='sigmoid', name='fc9')(x)
# Assemble the modified model
model = Model(inputs=base_model.input, outputs=outputs)
# Checkpoints for training
model_checkpoint = ModelCheckpoint(output_directory + "lastbest-0.hdf5", verbose=1, save_best_only=True)
early_stopping = EarlyStopping(patience=STOPPING_PATIENCE, restore_best_weights=True)
tensorboard = TensorBoard(log_dir=output_directory, histogram_freq=0, write_graph=True, write_images=False)
reduce_lr = ReduceLROnPlateau('val_loss', factor=0.5, patience=LR_PATIENCE, min_lr=0.000003125)
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=INITIAL_LR), metrics=['categorical_accuracy'])
csv_logger = CSVLogger(output_directory + "training_metrics.csv")
# Train model until MAX_EPOCH, restarting after each early stop when learning has plateaued
global_epoch = 0
restarts = 0
last_best_losses = []
last_best_epochs = []
while global_epoch < MAX_EPOCH:
history = model.fit_generator(
generator=train_data_generator,
steps_per_epoch=train_image_count // BATCH_SIZE,
epochs=MAX_EPOCH - global_epoch,
validation_data=val_data_generator,
validation_steps=val_image_count // BATCH_SIZE,
callbacks=[tensorboard, model_checkpoint, early_stopping, reduce_lr, csv_logger],
shuffle=False)
last_best_losses.append(min(history.history['val_loss']))
last_best_local_epoch = history.history['val_loss'].index(min(history.history['val_loss']))
last_best_epochs.append(global_epoch + last_best_local_epoch)
if early_stopping.stopped_epoch == 0:
print("Completed training after {} epochs.".format(MAX_EPOCH))
break
else:
global_epoch = global_epoch + early_stopping.stopped_epoch - STOPPING_PATIENCE + 1
print("Early stopping triggered after local epoch {} (global epoch {}).".format(
early_stopping.stopped_epoch, global_epoch))
print("Restarting from last best val_loss at local epoch {} (global epoch {}).".format(
early_stopping.stopped_epoch - STOPPING_PATIENCE, global_epoch - STOPPING_PATIENCE))
restarts = restarts + 1
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=INITIAL_LR / 2 ** restarts),
metrics=['categorical_accuracy'])
model_checkpoint = ModelCheckpoint(output_directory + "lastbest-{}.hdf5".format(restarts),
monitor='val_loss', verbose=1, save_best_only=True, mode='min')
# Save last best model info
with open(output_directory + "last_best_models.csv", 'w', newline='') as file:
writer = csv.writer(file, delimiter=',')
writer.writerow(['Model file', 'Global epoch', 'Validation loss'])
for i in range(restarts + 1):
writer.writerow(["lastbest-{}.hdf5".format(i), last_best_epochs[i], last_best_losses[i]])
# Load the last best model
model = load_model(
output_directory + "lastbest-{}.hdf5".format(last_best_losses.index(min(last_best_losses))))
# Evaluate model on test subset for kth fold
predictions = model.predict_generator(test_data_generator, test_image_count // BATCH_SIZE + 1)
y_true = test_data_generator.classes
y_pred = np.argmax(predictions, axis=1)
y_pred[np.max(predictions, axis=1) < 1 / 9] = 8 # Assign predictions worse than random guess to negative class
# Generate and print classification metrics and confusion matrix
print(classification_report(y_true, y_pred, labels=CLASSES, target_names=CLASS_NAMES))
report = classification_report(y_true, y_pred, labels=CLASSES, target_names=CLASS_NAMES, output_dict=True)
with open(output_directory + 'classification_report.csv', 'w') as f:
for key in report.keys():
f.write("%s,%s\n" % (key, report[key]))
conf_arr = confusion_matrix(y_true, y_pred, labels=CLASSES)
print(conf_arr)
np.savetxt(output_directory + "confusion_matrix.csv", conf_arr, delimiter=",")
# Clear model from GPU after each iteration
print("Finished testing fold {}\n".format(k + 1))
K.clear_session()
k = k + 1
def inference(model):
# Create new output directory for saving inference times
timestamp = datetime.fromtimestamp(time()).strftime('%Y%m%d-%H%M%S')
output_directory = "{}{}/".format(OUTPUT_DIRECTORY, timestamp)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Load DeepWeeds dataframe
dataframe = pd.read_csv(LABEL_DIRECTORY + "labels.csv")
image_count = dataframe.shape[0]
filenames = dataframe.Filename
preprocessing_times = []
inference_times = []
for i in range(image_count):
# Load image
start_time = time()
img = imread(IMG_DIRECTORY + filenames[i])
# Resize to 224x224
img = resize(img, (224, 224))
# Map to batch
img = np.expand_dims(img, axis=0)
# Scale from int to float
img = img * 1./255
preprocessing_time = time() - start_time
start_time = time()
# Predict label
prediction = model.predict(img, batch_size=1, verbose=0)
y_pred = np.argmax(prediction, axis=1)
y_pred[np.max(prediction, axis=1) < 1/9] = 8
inference_time = time() - start_time
# Append times to lists
preprocessing_times.append(preprocessing_time)
inference_times.append(inference_time)
# Save inference times to csv
with open(output_directory + "tf_inference_times.csv", 'w', newline='') as file:
writer = csv.writer(file, delimiter=',')
writer.writerow(['Filename', 'Preprocessing time (ms)', 'Inference time (ms)'])
for i in range(image_count):
writer.writerow([filenames[i], preprocessing_times[i] * 1000, inference_times[i] * 1000])
if __name__ == '__main__':
# Parse command line arguments
(command, model) = parse_args()
# Download images and models (if necessary)
download_images()
download_models()
if command == "cross_validate":
if not model == "resnet" and not model == "inception":
print("Error: You must ask for either ""resnet"" or ""inception"".")
else:
# Train and test model on DeepWeeds with 5 fold cross validation
cross_validate(model)
else:
if not model.endswith("hdf5"):
print("Error: You must supply a hdf5 model file to perform inference.")
else:
# Construct model from hdf5 model file
model = load_model(model)
# Measure the speed of performing inference with the chosen model averaging over DeepWeeds images
inference(model)
| |
import sys
import copy
import capstone
from hsdecomp import ptrutil, machine, show
from hsdecomp.parse import disasm, info
from hsdecomp.types import *
def interp_args(args, arg_pattern):
ret = []
arg_idx = 0
for pat in arg_pattern:
if pat == 'p':
ret.append(Pointer(args[arg_idx].untagged))
arg_idx += 1
elif pat == 'n':
ret.append(args[arg_idx].untagged.value + args[arg_idx].tag)
arg_idx += 1
elif pat == 'v':
ret.append(None)
return ret
def read_closure(settings, worklist, heaps, pointer):
try:
info_pointer = ptrutil.dereference(settings, pointer, heaps, []).untagged
assert isinstance(info_pointer, StaticValue)
info_address = info_pointer.value
info_type = info.read_closure_type(settings, info_address)
if settings.opts.verbose:
print(" Type:", info_type)
if info_type[:11] == 'constructor':
num_ptrs = ptrutil.read_half_word(settings, settings.text_offset + info_address - settings.rt.halfword.size*4)
num_non_ptrs = ptrutil.read_half_word(settings, settings.text_offset + info_address - settings.rt.halfword.size*3)
args = []
arg_pointer = ptrutil.make_tagged(settings, pointer)._replace(tag = 0)
for i in range(num_ptrs + num_non_ptrs):
arg_pointer = ptrutil.pointer_offset(settings, arg_pointer, settings.rt.word.size);
args.append(ptrutil.dereference(settings, arg_pointer.untagged, heaps, []))
arg_pattern = 'p' * num_ptrs + 'n' * num_non_ptrs
for arg in args[:num_ptrs]:
worklist.append(ClosureWork(heaps = heaps, pointer = arg.untagged))
return Apply(func = Pointer(info_pointer), func_type = 'constructor', args = interp_args(args, arg_pattern), pattern = arg_pattern)
elif info_type[:11] == 'indirection':
tagged = ptrutil.make_tagged(settings, pointer)._replace(tag = 0)
offset = ptrutil.pointer_offset(settings, tagged, settings.rt.word.size)
new_ptr = ptrutil.dereference(settings, offset.untagged, heaps, [])
if settings.opts.verbose:
print()
worklist.append(ClosureWork(heaps = heaps, pointer = new_ptr.untagged))
return Pointer(new_ptr.untagged)
elif info_type[:8] == 'function':
arg_pattern = info.read_arg_pattern(settings, info_address)
else:
arg_pattern = ''
worklist.append(FunctionThunkWork(heaps = heaps, address = info_address, main_register = ptrutil.make_tagged(settings, pointer)._replace(tag = len(arg_pattern)), arg_pattern = arg_pattern))
return Pointer(info_pointer)
except:
e_type, e_obj, e_tb = sys.exc_info()
print("Error when processing closure at", show.show_pretty_pointer(settings, pointer))
print(" Error:", e_obj)
print(" Error Location:", e_tb.tb_lineno)
print(" No Disassembly Available")
print()
return UnknownInterpretation()
def read_function_thunk(settings, worklist, heaps, address, main_register, arg_pattern):
extra_stack = []
registers = {}
registers[settings.rt.main_register] = main_register
for i in range(len(arg_pattern)):
if arg_pattern[i] != 'v':
if i < len(settings.rt.arg_registers):
registers[settings.rt.arg_registers[i]] = ptrutil.make_tagged(settings, Argument(index = i, func = address))
else:
extra_stack.append(ptrutil.make_tagged(settings, Argument(index = i, func = address)))
body = read_code(settings, worklist, heaps, address, extra_stack, registers)
if arg_pattern == '':
return body
else:
return Lambda(func = address, arg_pattern = arg_pattern, body = body)
def gather_case_arms(settings, heaps, address, min_tag, max_tag, initial_stack, initial_registers, original_stack, original_inspection, path):
mach = machine.Machine(settings, heaps, copy.deepcopy(initial_stack), copy.deepcopy(initial_registers))
first_instructions = list(disasm.disasm_from_until(settings, address, lambda insn: insn.group(capstone.x86.X86_GRP_JUMP)))
mach.simulate(first_instructions)
if first_instructions[-2].mnemonic == 'cmp' and isinstance(mach.load(first_instructions[-2].operands[0]), Tagged) and isinstance(mach.load(first_instructions[-2].operands[0]).untagged, Offset) and isinstance(mach.load(first_instructions[-2].operands[0]).untagged.base, CasePointer) and first_instructions[-2].operands[1].type == capstone.x86.X86_OP_IMM:
assert first_instructions[-1].mnemonic == 'jae'
small_address = sum(map(lambda insn: insn.size, first_instructions)) + address
large_address = first_instructions[-1].operands[0].imm
arms_small, tags_small, stacks_small, regs_small = gather_case_arms(settings, heaps, small_address, min_tag, first_instructions[-2].operands[1].imm - 1, mach.stack, mach.registers, original_stack, original_inspection, path + [address])
arms_large, tags_large, stacks_large, regs_large = gather_case_arms(settings, heaps, large_address, first_instructions[-2].operands[1].imm, max_tag, mach.stack, mach.registers, original_stack, original_inspection, path + [address])
arms = arms_small + arms_large
tags = tags_small + tags_large
stacks = stacks_small + stacks_large
registers = regs_small + regs_large
else:
arms = [address]
if min_tag == max_tag:
tag = NumericTag(value = min_tag)
else:
tag = DefaultTag()
tags = [tag]
# Resimulate the steps taken to get to this point with the correctly tagged CasePointer
mach = machine.Machine(settings, heaps, copy.deepcopy(original_stack), {
settings.rt.main_register: ptrutil.make_tagged(settings, Offset(base = CasePointer(inspection = original_inspection, matched_tag = tag), index = 0))._replace(tag = min_tag),
settings.rt.stack_register: ptrutil.make_tagged(settings, Offset(base = StackPointer(), index = -len(original_stack)))
})
for step in path:
mach.simulate(disasm.disasm_from_until(settings, step, lambda insn: insn.group(capstone.x86.X86_GRP_JUMP)))
stacks = [mach.stack]
registers = [mach.registers]
return arms, tags, stacks, registers
def read_case(settings, worklist, heaps, pointer, stack, scrutinee):
try:
if settings.opts.verbose:
print("Found case inspection!")
info_name = show.get_name_for_address(settings, pointer.value)
if settings.opts.verbose:
print(" Name:", show.demangle(info_name))
arms, tags, stacks, registers = gather_case_arms(settings, heaps, pointer.value, 1, settings.rt.word.size - 1, stack, {
settings.rt.main_register: ptrutil.make_tagged(settings, Offset(base = CasePointer(inspection = pointer, matched_tag = DefaultTag()), index = 0)),
settings.rt.stack_register: ptrutil.make_tagged(settings, Offset(base = StackPointer(), index = -len(stack)))
}, stack, pointer, [])
interp_arms = []
for arm, tag, stack, regs in zip(arms, tags, stacks, registers):
if settings.opts.verbose:
print()
print("Found case arm:")
print(" From case:", info_name)
print(" Pattern:", tag)
interp_arms.append(read_code(settings, worklist, heaps, arm, stack, regs))
return Case(scrutinee = scrutinee, bound_ptr = pointer, arms = interp_arms, tags = tags)
except:
e_type, e_obj, e_tb = sys.exc_info()
print("Error in processing case at", show.show_pretty_pointer(settings, pointer))
print(" Error:", e_obj)
print(" Error Location:", e_tb.tb_lineno)
print(" Disassembly:")
for insn in disasm.disasm_from(settings, pointer.value):
print(" " + show.show_instruction(insn))
print()
return UnknownInterpretation()
def read_code(settings, worklist, heaps, address, extra_stack, registers):
try:
instructions = list(disasm.disasm_from(settings, address))
registers[settings.rt.heap_register] = ptrutil.make_tagged(settings, Offset(base = HeapPointer(id = len(heaps), owner = address), index = -1))
registers[settings.rt.stack_register] = ptrutil.make_tagged(settings, Offset(base = StackPointer(), index = -len(extra_stack)))
mach = machine.Machine(settings, heaps, extra_stack, registers)
mach.simulate(instructions)
registers = mach.registers
stack = mach.stack[registers[settings.rt.stack_register].untagged.index+len(mach.stack):]
new_heaps = heaps + [mach.heap]
if settings.opts.verbose:
print(" Heap:", list(map(lambda h: show.show_pretty_value(settings, h), mach.heap)))
print(" Stack:", list(map(lambda s: show.show_pretty_value(settings, s), stack)))
if instructions[-1].operands[0].type == capstone.x86.X86_OP_MEM and machine.base_register(instructions[-1].operands[0].mem.base) == settings.rt.stack_register:
if settings.opts.verbose:
print(" Interpretation: return", show.show_pretty_value(settings, registers[settings.rt.main_register]))
returned = registers[settings.rt.main_register].untagged
interpretation = Pointer(returned)
worklist.append(ClosureWork(heaps = new_heaps, pointer = returned))
else:
uses = []
if instructions[-1].operands[0].type == capstone.x86.X86_OP_MEM:
assert machine.base_register(instructions[-1].operands[0].mem.base) == settings.rt.main_register
assert instructions[-1].operands[0].mem.disp == 0
if settings.opts.verbose:
print(" Interpretation: evaluate", show.show_pretty_value(settings, registers[settings.rt.main_register]))
evaled = registers[settings.rt.main_register].untagged
stack_index = 0
interpretation = Pointer(evaled)
worklist.append(ClosureWork(heaps = new_heaps, pointer = evaled))
elif instructions[-1].operands[0].type == capstone.x86.X86_OP_IMM:
jmp_address = instructions[-1].operands[0].imm
if jmp_address in settings.address_to_name and settings.address_to_name[jmp_address][:7] == 'stg_ap_':
func = settings.address_to_name[jmp_address]
if func.split('_')[2] == '0':
arg_pattern = ''
else:
arg_pattern = func.split('_')[2]
called = registers[settings.rt.main_register].untagged
worklist.append(ClosureWork(heaps = new_heaps, pointer = called))
func_type = 'closure'
else:
arg_pattern = info.read_arg_pattern(settings, jmp_address)
called = StaticValue(value = jmp_address)
worklist.append(FunctionThunkWork(heaps = new_heaps, address = jmp_address, main_register = registers[settings.rt.main_register], arg_pattern = arg_pattern))
func_type = 'info'
num_args = sum(1 for e in filter(lambda pat: pat != 'v', arg_pattern))
if settings.opts.verbose:
print(" Number of non-void args:", num_args)
print(" Called:", show.show_pretty_pointer(settings, called))
print(" Arg pattern:", arg_pattern)
args = []
stack_index = num_args
for reg, i in zip(settings.rt.arg_registers, range(num_args)):
args.append(registers[reg])
stack_index -= 1
args += stack[:stack_index]
if settings.opts.verbose:
print(" Interpretation: call", show.show_pretty_pointer(settings, called), "on", list(map(lambda s: show.show_pretty_value(settings, s), args)))
interpretation = Apply(func_type = func_type, func = Pointer(called), args = interp_args(args, arg_pattern), pattern = arg_pattern)
for arg, pat in zip(args, arg_pattern):
if pat == 'p':
worklist.append(ClosureWork(heaps = new_heaps, pointer = arg.untagged))
while stack_index < len(stack):
assert isinstance(stack[stack_index].untagged, StaticValue)
cont_name = show.get_name_for_address(settings, stack[stack_index].untagged.value)
if cont_name[:7] == 'stg_ap_':
assert cont_name[-5:] == '_info'
arg_pattern = cont_name.split('_')[2]
num_extra_args = sum(1 for e in filter(lambda pat: pat != 'v', arg_pattern))
if settings.opts.verbose:
print(" then apply the result to", list(map(lambda s: show.show_pretty_value(settings, s), stack[stack_index+1:][:num_extra_args])))
interpretation = Apply(func_type = 'closure', func = interpretation, args = interp_args(stack[stack_index+1:][:num_extra_args], arg_pattern), pattern = arg_pattern)
for arg in stack[stack_index+1:][:num_extra_args]:
worklist.append(ClosureWork(heaps = new_heaps, pointer = arg.untagged))
stack_index += 1 + num_extra_args
elif cont_name == 'stg_upd_frame_info' or cont_name == 'stg_bh_upd_frame_info':
if settings.opts.verbose:
print(" then update the thunk at", show.show_pretty_value(settings, stack[stack_index + 1]))
stack_index += 2
else:
if settings.opts.verbose:
print(" then inspect using", show.show_pretty_value(settings, stack[stack_index]))
print()
interpretation = read_case(settings, worklist, new_heaps, stack[stack_index].untagged, stack[stack_index:], interpretation)
stack_index = len(stack)
if settings.opts.verbose:
print()
return interpretation
except:
e_type, e_obj, e_tb = sys.exc_info()
print("Error in processing code at", show.show_pretty_address(settings, address))
print(" Error:", e_obj)
print(" Error Location:", e_tb.tb_lineno)
print(" Disassembly:")
for insn in disasm.disasm_from(settings, address):
print(" " + show.show_instruction(insn))
print()
return UnknownInterpretation()
| |
#!/usr/bin/env python
import re
import time
import os
import sys
import json
import argparse
import logging
import logging.handlers
import subprocess
import StringIO
from neutronclient.neutron import client as q_client
from keystoneclient.v2_0 import client as ks_client
LOG_NAME='q-agent-cleanup'
API_VER = '2.0'
PORT_ID_PART_LEN=11
def get_authconfig(cfg_file):
# Read OS auth config file
rv = {}
stripchars=" \'\""
with open(cfg_file) as f:
for line in f:
rg = re.match(r'\s*export\s+(\w+)\s*=\s*(.*)',line)
if rg :
#print("[{}]-[{}]".format(rg.group(1),rg.group(2).strip()))
rv[rg.group(1).strip(stripchars)]=rg.group(2).strip(stripchars)
return rv
class NeutronCleaner(object):
PORT_NAME_PREFIXES_BY_DEV_OWNER = {
'network:dhcp': 'tap',
'network:router_gateway': 'qg-',
'network:router_interface': 'qr-',
}
PORT_NAME_PREFIXES = {
# contains tuples of prefixes
'dhcp': (PORT_NAME_PREFIXES_BY_DEV_OWNER['network:dhcp'],),
'l3': (
PORT_NAME_PREFIXES_BY_DEV_OWNER['network:router_gateway'],
PORT_NAME_PREFIXES_BY_DEV_OWNER['network:router_interface']
)
}
BRIDGES_FOR_PORTS_BY_AGENT ={
'dhcp': ('br-int',),
'l3': ('br-int', 'br-ex'),
}
PORT_OWNER_PREFIXES = {
'dhcp': ('network:dhcp',),
'l3': ('network:router_gateway', 'network:router_interface')
}
NS_NAME_PREFIXES = {
'dhcp': 'qdhcp',
'l3': 'qrouter',
}
AGENT_BINARY_NAME = {
'dhcp': 'neutron-dhcp-agent',
'l3': 'neutron-l3-agent',
'ovs': 'neutron-openvswitch-agent'
}
CMD__list_ovs_port = ['ovs-vsctl', 'list-ports']
CMD__remove_ovs_port = ['ovs-vsctl', '--', '--if-exists', 'del-port']
CMD__remove_ip_addr = ['ip', 'address', 'delete']
def __init__(self, openrc, options, log=None):
self.log = log
self.auth_config = openrc
self.options = options
self.agents = {}
self.debug = options.get('debug')
self.RESCHEDULING_CALLS = {
'dhcp': self._reschedule_agent_dhcp,
'l3': self._reschedule_agent_l3,
}
ret_count = self.options.get('retries',1)
while True:
if ret_count <= 0 :
print(">>> Keystone error: no more retries for connect to keystone server.")
sys.exit(1)
try:
self.keystone = ks_client.Client(
username=openrc['OS_USERNAME'],
password=openrc['OS_PASSWORD'],
tenant_name=openrc['OS_TENANT_NAME'],
auth_url=openrc['OS_AUTH_URL'],
)
break
except Exception as e:
errmsg = e.message.strip()
if re.search(r"Connection\s+refused$", errmsg, re.I) or \
re.search(r"Connection\s+timed\s+out$", errmsg, re.I) or\
re.search(r"Service\s+Unavailable$", errmsg, re.I) or\
re.search(r"'*NoneType'*\s+object\s+has\s+no\s+attribute\s+'*__getitem__'*$", errmsg, re.I) or \
re.search(r"No\s+route\s+to\s+host$", errmsg, re.I):
print(">>> Can't connect to {0}, wait for server ready...".format(self.auth_config['OS_AUTH_URL']))
time.sleep(self.options.sleep)
else:
print(">>> Keystone error:\n{0}".format(e.message))
raise e
ret_count -= 1
self.token = self.keystone.auth_token
self.client = q_client.Client(
API_VER,
endpoint_url=self.keystone.service_catalog.url_for(service_type='network'),
token=self.token,
)
def _neutron_API_call(self, method, *args):
ret_count = self.options.get('retries')
while True:
if ret_count <= 0 :
self.log.error("Q-server error: no more retries for connect to server.")
return []
try:
rv = method (*args)
break
except Exception as e:
errmsg = e.message.strip()
if re.search(r"Connection\s+refused", errmsg, re.I) or\
re.search(r"Connection\s+timed\s+out", errmsg, re.I) or\
re.search(r"503\s+Service\s+Unavailable", errmsg, re.I) or\
re.search(r"No\s+route\s+to\s+host", errmsg, re.I):
self.log.info("Can't connect to {0}, wait for server ready...".format(self.keystone.service_catalog.url_for(service_type='network')))
time.sleep(self.options.sleep)
else:
self.log.error("Neutron error:\n{0}".format(e.message))
raise e
ret_count -= 1
return rv
def _get_ports(self):
return self._neutron_API_call(self.client.list_ports)['ports']
def _get_agents(self, use_cache=True):
return self._neutron_API_call(self.client.list_agents)['agents']
def _list_networks_on_dhcp_agent(self, agent_id):
return self._neutron_API_call(self.client.list_networks_on_dhcp_agent, agent_id)['networks']
def _list_routers_on_l3_agent(self, agent_id):
return self._neutron_API_call(self.client.list_routers_on_l3_agent, agent_id)['routers']
def _add_network_to_dhcp_agent(self, agent_id, net_id):
return self._neutron_API_call(self.client.add_network_to_dhcp_agent, agent_id, {"network_id": net_id})
def _add_router_to_l3_agent(self, agent_id, router_id):
return self._neutron_API_call(self.client.add_router_to_l3_agent, agent_id, {"router_id": router_id})
def _remove_router_from_l3_agent(self, agent_id, router_id):
return self._neutron_API_call(self.client.remove_router_from_l3_agent, agent_id, router_id)
def _get_ports_by_agent(self, agent, activeonly=False, localnode=False, port_id_part_len=PORT_ID_PART_LEN):
self.log.debug("__get_ports_by_agent: start, agent='{0}', activeonly='{1}'".format(agent, activeonly))
ports = self._get_ports()
#self.log.debug(json.dumps(ports, indent=4))
if activeonly:
tmp = []
for i in ports:
if i['status'] == 'ACTIVE':
tmp.append(i)
ports = tmp
agent_ports = []
for i in ports:
if i['device_owner'] in self.PORT_OWNER_PREFIXES.get(agent):
agent_ports.append(i)
if localnode:
# get ports for this agent, existing on this node
port_id_starts = set()
for i in self.BRIDGES_FOR_PORTS_BY_AGENT.get(agent,[]):
cmd = []
cmd.extend(self.CMD__list_ovs_port)
cmd.append(i)
process = subprocess.Popen(
cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
rc = process.wait()
if rc != 0:
self.log.error("ERROR (rc={0}) while execution '{1}'".format(rc,' '.join(cmd)))
else:
stdout = process.communicate()[0]
for port in StringIO.StringIO(stdout):
port = port.strip()
for j in self.PORT_NAME_PREFIXES.get(agent,[]):
if port.startswith(j):
port_id_starts.add(port[len(j):])
break
rv = []
for i in agent_ports:
id_part = i['id'][:port_id_part_len]
if id_part in port_id_starts:
rv.append(i)
else:
rv = agent_ports
self.log.debug("__get_ports_by_agent: end, rv='{0}'".format(json.dumps(rv, indent=4)))
return rv
def _get_portnames_and_IPs_for_agent(self, agent, port_id_part_len=PORT_ID_PART_LEN, localnode=False):
self.log.debug("_get_portnames_and_IPs_for_agent: start, agent='{0}'".format(agent))
port_name_prefix = self.PORT_NAME_PREFIXES.get(agent)
if port_name_prefix is None:
self.log.debug("port_name_prefix is None")
return []
rv = []
for i in self._get_ports_by_agent(agent, activeonly=self.options.get('activeonly'), localnode=localnode):
# _rr = "{0}{1} {2}".format(self.PORT_NAME_PREFIXES_BY_DEV_OWNER[i['device_owner']], i['id'][:port_id_part_len], i['fixed_ips'][0]['ip_address'])
_rr = ("{0}{1}".format(self.PORT_NAME_PREFIXES_BY_DEV_OWNER[i['device_owner']], i['id'][:port_id_part_len]), i['fixed_ips'][0]['ip_address'])
#todo: returns array of IPs. IPs may be more than one
rv.append(_rr)
self.log.debug("_get_portnames_and_IPs_for_agent: end, rv='{0}'".format(json.dumps(rv, indent=4)))
return rv
def _cleanup_ovs_ports(self, portlist):
self.log.info("Ports {0} will be cleaned.".format(json.dumps(portlist)))
for port in portlist:
cmd = []
cmd.extend(self.CMD__remove_ovs_port)
cmd.append(port)
if self.options.get('noop'):
self.log.info("NOOP-execution: '{0}'".format(' '.join(cmd)))
else:
process = subprocess.Popen(
cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
rc = process.wait()
if rc != 0:
self.log.error("ERROR (rc={0}) while execution {1}".format(rc,cmd))
#
def _cleanup_ip_addresses(self, addrlist):
self.log.info("IP addresses {0} will be cleaned.".format(json.dumps(addrlist)))
addrs=set([str(x) for x in addrlist])
re_inet = re.compile(r'\s*inet\s')
re_addrline = re.compile(r'inet\s+(\d+\.\d+\.\d+\.\d+\/\d+)\s+.*\s([\w\-\.\_]+)$')
ip2ifaces = {}
ifaces2ip = {}
# get IP list for this system
process = subprocess.Popen(
['ip','addr','show'],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout = process.communicate()[0]
rc = process.wait()
if rc != 0:
self.log.error("ERROR (rc={0}) while execution {1}".format(rc,' '.join(cmd)))
return False
for i in StringIO.StringIO(stdout):
if re_inet.match(i):
rgx = re_addrline.search(i)
if rgx:
ip, iface = re_addrline.search(i).groups()
# tmp = ip2ifaces.get(ip)
# if not tmp:
# ip2ifaces[ip] = set([])
# tmp = ip2ifaces.get(ip)
# tmp.add(iface)
# tmp = ifaces2ip.get(iface)
# if not tmp:
# ifaces2ip[iface] = set([])
# tmp = ifaces2ip.get(iface)
# tmp.add(ip)
addr = ip.split('/')[0]
if addr in addrs:
cmd = []
cmd.extend(self.CMD__remove_ip_addr)
cmd.extend([ip,'dev',iface])
if self.options.get('noop'):
self.log.info("NOOP-execution:{0}".format(' '.join(cmd)))
else:
process = subprocess.Popen(
cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
rc = process.wait()
if rc != 0:
self.log.error("ERROR (rc={0}) while execution {1}".format(rc,' '.join(cmd)))
addrs.remove(addr)
if len(addrs) == 0:
break
#
def _get_agents_by_type(self, agent, use_cache=True):
self.log.debug("_get_agents_by_type: start.")
rv = self.agents.get(agent, []) if use_cache else []
if not rv:
agents = self._get_agents(use_cache=use_cache)
for i in agents:
if i['binary'] == self.AGENT_BINARY_NAME.get(agent):
rv.append(i)
from_cache = ''
else:
from_cache = ' from local cache'
self.log.debug("_get_agents_by_type: end, {0} rv: {1}".format(from_cache, json.dumps(rv, indent=4)))
return rv
def _cleanup_ports(self, agent, activeonly=False):
self.log.debug("_cleanup_ports: start.")
rv = False
port_ip_list = self._get_portnames_and_IPs_for_agent(agent, localnode=True)
# Cleanup ports
port_list = [x[0] for x in port_ip_list]
self._cleanup_ovs_ports(port_list)
# Cleanup IP addresses
ip_list = [x[1] for x in port_ip_list]
self._cleanup_ip_addresses(ip_list)
self.log.debug("_cleanup_ports: end.")
#return rv
def _reschedule_agent_dhcp(self, agent_type):
self.log.debug("_reschedule_agent_dhcp: start.")
agents = {
'alive': [],
'dead': []
}
# collect networklist from dead DHCP-agents
dead_networks = []
for agent in self._get_agents_by_type(agent_type):
if agent['alive']:
self.log.info("found alive DHCP agent: {0}".format(agent['id']))
agents['alive'].append(agent)
else:
# dead agent
self.log.info("found dead DHCP agent: {0}".format(agent['id']))
agents['dead'].append(agent)
for net in self._list_networks_on_dhcp_agent(agent['id']):
dead_networks.append(net)
if dead_networks and agents['alive']:
# get network-ID list of already attached to alive agent networks
lucky_ids = set()
map(
lambda net: lucky_ids.add(net['id']),
self._list_networks_on_dhcp_agent(agents['alive'][0]['id'])
)
# add dead networks to alive agent
for net in dead_networks:
if net['id'] not in lucky_ids:
# attach network to agent
self.log.info("attach network {net} to DHCP agent {agent}".format(
net=net['id'],
agent=agents['alive'][0]['id']
))
if not self.options.get('noop'):
self._add_network_to_dhcp_agent(agents['alive'][0]['id'], net['id'])
#if error:
# return
# remove dead agents if need (and if found alive agent)
if self.options.get('remove-dead'):
for agent in agents['dead']:
self.log.info("remove dead DHCP agent: {0}".format(agent['id']))
if not self.options.get('noop'):
self._neutron_API_call(self.client.delete_agent, agent['id'])
self.log.debug("_reschedule_agent_dhcp: end.")
def _reschedule_agent_l3(self, agent_type):
self.log.debug("_reschedule_agent_l3: start.")
agents = {
'alive': [],
'dead': []
}
# collect router-list from dead DHCP-agents
dead_routers = [] # array of tuples (router, agentID)
for agent in self._get_agents_by_type(agent_type):
if agent['alive']:
self.log.info("found alive L3 agent: {0}".format(agent['id']))
agents['alive'].append(agent)
else:
# dead agent
self.log.info("found dead L3 agent: {0}".format(agent['id']))
agents['dead'].append(agent)
map(
lambda rou: dead_routers.append((rou, agent['id'])),
self._list_routers_on_l3_agent(agent['id'])
)
self.log.debug("L3 agents in cluster: {ags}".format(ags=json.dumps(agents, indent=4)))
self.log.debug("Routers, attached to dead L3 agents: {rr}".format(rr=json.dumps(dead_routers, indent=4)))
if dead_routers and agents['alive']:
# get router-ID list of already attached to alive agent routerss
lucky_ids = set()
map(
lambda rou: lucky_ids.add(rou['id']),
self._list_routers_on_l3_agent(agents['alive'][0]['id'])
)
# remove dead agents after rescheduling
for agent in agents['dead']:
self.log.info("remove dead L3 agent: {0}".format(agent['id']))
if not self.options.get('noop'):
self._neutron_API_call(self.client.delete_agent, agent['id'])
# move routers from dead to alive agent
for rou in filter(lambda rr: not(rr[0]['id'] in lucky_ids), dead_routers):
# self.log.info("unschedule router {rou} from L3 agent {agent}".format(
# rou=rou[0]['id'],
# agent=rou[1]
# ))
# if not self.options.get('noop'):
# self._remove_router_from_l3_agent(rou[1], rou[0]['id'])
# #todo: if error:
# #
self.log.info("schedule router {rou} to L3 agent {agent}".format(
rou=rou[0]['id'],
agent=agents['alive'][0]['id']
))
if not self.options.get('noop'):
self._add_router_to_l3_agent(agents['alive'][0]['id'], rou[0]['id'])
#todo: if error:
self.log.debug("_reschedule_agent_l3: end.")
def _reschedule_agent(self, agent):
self.log.debug("_reschedule_agents: start.")
task = self.RESCHEDULING_CALLS.get(agent, None)
if task:
task (agent)
self.log.debug("_reschedule_agents: end.")
def do(self, agent):
if self.options.get('list-agents'):
self._list_agents(agent)
return 0
if self.options.get('cleanup-ports'):
self._cleanup_ports(agent)
if self.options.get('reschedule'):
self._reschedule_agent(agent)
# if self.options.get('remove-agent'):
# self._cleanup_agents(agent)
def _test_healthy(self, agent_list, hostname):
rv = False
for agent in agent_list:
if agent['host'] == hostname and agent['alive']:
return True
return rv
def test_healthy(self, agent_type):
rc = 9 # OCF_FAILED_MASTER, http://www.linux-ha.org/doc/dev-guides/_literal_ocf_failed_master_literal_9.html
agentlist = self._get_agents_by_type(agent_type)
for hostname in self.options.get('test-hostnames'):
if self._test_healthy(agentlist, hostname):
return 0
return rc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Neutron network node cleaning tool.')
parser.add_argument("-c", "--auth-config", dest="authconf", default="/root/openrc",
help="Authenticating config FILE", metavar="FILE")
parser.add_argument("--retries", dest="retries", type=int, default=50,
help="try NN retries for API call", metavar="NN")
parser.add_argument("--sleep", dest="sleep", type=int, default=2,
help="sleep seconds between retries", metavar="SEC")
parser.add_argument("-a", "--agent", dest="agent", action="append",
help="specyfy agents for cleaning", required=True)
parser.add_argument("--cleanup-ports", dest="cleanup-ports", action="store_true", default=False,
help="cleanup ports for given agents on this node")
parser.add_argument("--activeonly", dest="activeonly", action="store_true", default=False,
help="cleanup only active ports")
# parser.add_argument("--cleanup-ns", dest="cleanup-ns", action="store_true", default=False,
# help="cleanup namespaces for given agents")
# parser.add_argument("--remove-agent", dest="remove-agent", action="store_true", default=False,
# help="cleanup namespaces for given agents")
parser.add_argument("--reschedule", dest="reschedule", action="store_true", default=False,
help="reschedule given agents")
parser.add_argument("--remove-dead", dest="remove-dead", action="store_true", default=False,
help="remove dead agents while rescheduling")
parser.add_argument("--test-alive-for-hostname", dest="test-hostnames", action="append",
help="testing agent's healthy for given hostname")
# parser.add_argument("--list", dest="list-agents", action="store_true", default=False,
# help="list agents and some additional information")
parser.add_argument("--external-bridge", dest="external-bridge", default="br-ex",
help="external bridge name", metavar="IFACE")
parser.add_argument("--integration-bridge", dest="integration-bridge", default="br-int",
help="integration bridge name", metavar="IFACE")
parser.add_argument("-l", "--log", dest="log", action="store",
help="log file or logging.conf location")
parser.add_argument("--noop", dest="noop", action="store_true", default=False,
help="do not execute, print to log instead")
parser.add_argument("--debug", dest="debug", action="store_true", default=False,
help="debug")
args = parser.parse_args()
# if len(args) != 1:
# parser.error("incorrect number of arguments")
# parser.print_help() args = parser.parse_args()
#setup logging
if args.debug:
_log_level = logging.DEBUG
else:
_log_level = logging.INFO
if not args.log:
# log config or file not given -- log to console
LOG = logging.getLogger(LOG_NAME) # do not move to UP of file
_log_handler = logging.StreamHandler(sys.stdout)
_log_handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
LOG.addHandler(_log_handler)
LOG.setLevel(_log_level)
elif args.log.split(os.sep)[-1] == 'logging.conf':
# setup logging by external file
import logging.config
logging.config.fileConfig(args.log)
LOG = logging.getLogger(LOG_NAME) # do not move to UP of file
else:
# log to given file
LOG = logging.getLogger(LOG_NAME) # do not move to UP of file
LOG.addHandler(logging.handlers.WatchedFileHandler(args.log))
LOG.setLevel(_log_level)
LOG.info("Started: {0}".format(' '.join(sys.argv)))
cleaner = NeutronCleaner(get_authconfig(args.authconf), options=vars(args), log=LOG)
rc = 0
if vars(args).get('test-hostnames'):
rc = cleaner.test_healthy(args.agent[0])
else:
for i in args.agent:
cleaner.do(i)
LOG.debug("End.")
sys.exit(rc)
#
###
| |
#!/usr/bin/env python
#Profile Plotter
#Programmer: Philip Bulsink
#Licence: BSD
#Plots reaction profiles using matplotlib by reading in energies from a file.
#See the readme.md file for more information
from profile_plotter_helpers import *
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.path as Path
import argparse
from os import path
class PlotEntry:
"""Holds individual entries to the plot object"""
def __init__(self, number, connected_to, colour, energy, image, text):
self.number = self.add_number(number)
self.xindex = self.add_number(number)
self.add_connected(connected_to)
self.colour = self.add_colour(colour.strip())
self.energy = self.add_energy(energy)
self.image = self.check_image(image)
self.text = text[:20].strip()
def add_colour(self, colour):
"""
make sure the colour is in the list and standardize the possible output
else return black
"""
if colour.lower() in COLOURS:
return colour.lower()
for key in COLOURS:
if colour.lower() in key:
return colour.lower()
return 'black'
def check_image(self, image):
"""
Ensure the plot file exists.. Maybe change to checking for image file?
"""
if image != "" and check_files_exist([image]):
return image
return None
def add_number(self, number):
"""Make sure number makes sense"""
if is_positive_int(number):
return int(number)
else:
print "Number {} not valid.".format(number)
raise FormatError(number, "Number {} not valid.".format(number))
def add_connected(self, connection):
if is_positive_int(connection):
self.connected_to = int(connection)
else:
self.connected_to = ''
def add_energy(self, eng):
if is_float(eng):
return float(eng)
else:
raise FormatError(eng, "Energy {} not valid.".format(eng))
class PlotInfo:
"""Holds the info for the plot as an object"""
def __init__(self):
self.title = ''
self.filename = ''
self.filetype = 'png'
self.width = 600
self.height = 400
self.inunits = 'hartree'
self.outunits = 'kj/mol'
self.reference_line = 1
self.maxenergy = -0.0
self.minenergy = 0.0
self.maxxindex = 0
self.textheight = 0.0
self.textwidth = 0.4
self.textlocations = list()
def add_title(self, title):
"""Adds a title to the PlotInfo"""
self.title = title
def add_filename(self, filename):
"""Adds a filename to the PlotInfo"""
self.filename = filename
ftype = path.splitext(filename)[1][1:]
self.filetype = check_format(ftype)
if ftype != self.filetype:
self.filename = path.splitext(filename)[0] + '.' + self.filetype
def add_dimensions(self, dimensions):
"""Adds dimensions to the PlotInfo"""
dim = dimensions.split(', ')
if len(dim) == 2:
try:
self.width = int(dim[0])
self.height = int(dim[1])
except ValueError:
raise DimensionError(dimensions)
else:
self.dpi = 1200
elif len(dim) == 3:
try:
self.width = int(dim[0])
self.height = int(dim[1])
self.dpi = int(dim[2])
except ValueError:
raise DimensionError(dimensions)
else:
raise DimensionError(dimensions)
def add_units(self, units):
u = units.split(', ')
if len(u) != 2:
raise UnitError(units)
self.add_inunits(u[0])
self.add_outunits(u[1])
def add_inunits(self, units):
"""Adds input units to the PlotInfo"""
for u in UNIT_LIST:
if units.lower().strip() == u:
self.inunits = u
break
def add_outunits(self, units):
"""Adds output units to the PlotInfo"""
for u in UNIT_LIST:
if units.lower().strip() == u:
self.outunits = u
break
def add_reference(self, reference):
"""Adds zero reference to the PlotInfo"""
if is_int(reference):
self.reference_line = int(reference)
else:
raise FormatError(reference, "Error in reference line syntax: {}."
.format(reference))
def parsedata(self, inputdata):
"""Parse the rest of the data into the Infolist"""
self.plotdata = list()
#inputdata.splitlines()
for l in inputdata:
if l != '':
d = l.split(',')
if len(d) == 3:
d.append(d[0] - 1)
while len(d) < 6:
d.append("")
self.plotdata.append(PlotEntry(d[0], d[3], d[4], d[1], d[5], d[2]))
if self.reference_line > len(self.plotdata):
raise FormatError(self.reference_line,
"Can't refrerence to line {}. Only {} lines exist."
.format(self.reference_line, len(self.plotdata)))
#fixing the xindex values for the datapoints, and changing energy to
#output units
self.plotdata[0].xindex = 1
self.plotdata[0].energy = convert_units(float(self.plotdata[0].energy),
self.inunits, self.outunits)
self.maxenergy = self.plotdata[0].energy
self.minenergy = self.plotdata[0].energy
self.maxxindex = self.plotdata[0].xindex
for p in self.plotdata[1:]:
if not p.connected_to:
p.add_connected(p.number - 1)
p.xindex = self.plotdata[p.connected_to - 1].xindex+1
p.energy = convert_units(p.energy, self.inunits, self.outunits)
if p.energy < self.minenergy:
self.minenergy = p.energy
elif p.energy > self.maxenergy:
self.maxenergy = p.energy
if p.xindex > self.maxxindex:
self.maxxindex = p.xindex
#zeroing plotdata to reference (if needed)
if self.reference_line != 0:
ref = self.plotdata[self.reference_line - 1].energy
for p in self.plotdata:
p.energy = p.energy - ref
self.maxenergy = self.maxenergy - ref
self.minenergy = self.minenergy - ref
def generate_vectors(self):
self.vectors = list()
self.texts = list()
self.dx = 24
self.movex = self.dx/2*(self.maxenergy-self.minenergy)/self.height
#vectors are ([x,x], [y,y], 'colour,style')
for i in range(len(self.plotdata)):
pthis = self.plotdata[i]
if i != len(self.plotdata) - 1:
pnext = self.plotdata[i+1]
else:
pnext = False
point = [[pthis.xindex - 0.165, pthis.xindex + 0.165],
[pthis.energy, pthis.energy], '-', pthis.colour, 3]
self.vectors.append(point)
t = [pthis.xindex, pthis.energy+self.movex, pthis.text,
pthis.colour,'bottom', 'center', 'white', False]
self.texts.append(t)
engval = "{0:.1f}".format(pthis.energy)
t = [pthis.xindex,
pthis.energy-self.movex, engval,
pthis.colour, 'top','center', 'white', False]
self.texts.append(t)
if pnext:
if pnext.connected_to != pthis.number:
pthis = self.plotdata[pnext.connected_to - 1]
point = [[pthis.xindex + 0.165, pnext.xindex - 0.165],
[pthis.energy, pnext.energy], '--', pnext.colour, 1]
self.vectors.append(point)
yax = (pthis.energy + pnext.energy)/2
engval = "({0:.1f})".format(pnext.energy - pthis.energy)
if pnext.energy - pthis.energy > 0:
va = 'top'
else:
va = 'bottom'
t = [pthis.xindex+0.55, yax, engval, pnext.colour, va, 'left',
None, True]
self.texts.append(t)
def find_overlaps(self):
"""This iterates the self.texts and finds all instances of overlapping
text. Nudges up and down to try get around overlaps."""
more_overlaps = True
icounter = 0
while more_overlaps and icounter < 20:
more_overlaps = False
icounter += 1
for i in range(len(self.texts)):
for j in range(i):
if not j == i:
# rec=[x1, y1, x2, y2]
dx = self.texts[i][8][1][0]-self.texts[i][8][0][0]
dy = self.texts[i][8][1][1]-self.texts[i][8][0][1]
reca = [self.texts[i][0],
self.texts[i][1],
self.texts[i][0] + dx,
self.texts[i][1] + dy]
dx = self.texts[j][8][1][0]-self.texts[j][8][0][0]
dy = self.texts[j][8][1][1]-self.texts[j][8][0][1]
recb = [self.texts[j][0],
self.texts[j][1],
self.texts[j][0] + dx,
self.texts[j][1] + dy]
if test_overlap(reca, recb):
self.fix_overlap(i,j)
more_overlaps=True
def fix_overlap(self, refa, refb):
"""Overlap fixes can be in multiple directions:
IF one is ascending and one is decending:
move ascending up, descending down
IF Both same directions, same Y value
If one is larger value than other, move larger +ve up, -ve down
If same value:
move refa up, refb down <-- error also
IF otherwise:
move higher value up, lower value down
"""
if self.texts[refa][7] == False and self.texts[refb][7] == False:
return
delta = finddelta(self.texts[refa][1], self.texts[refb][1],
self.textheight) + 0.1
if self.texts[refa][7] == True and self.texts[refb][7] == True:
vala = clean_float(self.texts[refa][2])
valb = clean_float(self.texts[refb][2])
if vala == '' or valb == '' or vala == valb:
# +/- based on y
if self.texts[refa][1] > self.texts[refb][1]:
self.texts[refa][1] = self.texts[refa][1] + delta/2
self.texts[refb][1] = self.texts[refb][1] - delta/2
else:
self.texts[refa][1] = self.texts[refa][1] - delta/2
self.texts[refb][1] = self.texts[refb][1] + delta/2
elif vala > 0 and valb < 0:
# +/- based on vala/b
self.texts[refa][1] = self.texts[refa][1] + delta/2
self.texts[refb][1] = self.texts[refb][1] - delta/2
elif vala < 0 and valb > 0:
# +/- based on vala/b +/-
self.texts[refa][1] = self.texts[refa][1] - delta/2
self.texts[refb][1] = self.texts[refb][1] + delta/2
else:
# +/- based on magnitude vala/b
if vala > valb:
self.texts[refa][1] = self.texts[refa][1] + delta/2
self.texts[refb][1] = self.texts[refb][1] - delta/2
else:
self.texts[refa][1] = self.texts[refa][1] - delta/2
self.texts[refb][1] = self.texts[refb][1] + delta/2
elif self.texts[refb][7] == False:
if self.texts[refa][1] > self.texts[refb][1]:
self.texts[refa][1] = self.texts[refa][1] + delta
else:
self.texts[refa][1] = self.texts[refa][1] - delta
elif self.texts[refa][7] == False:
if self.texts[refa][1] > self.texts[refb][1]:
self.texts[refb][1] = self.texts[refb][1] - delta
else:
self.texts[refb][1] = self.texts[refb][1] + delta
return
def parse_file(ifile):
"""Parse the file and get all the information needed."""
inputfile = read_clean_file(ifile)
inputdir = path.dirname(ifile)
plot = PlotInfo()
plot.add_title(inputfile[0])
plot.add_filename(os.path.join(inputdir, inputfile[1]))
plot.add_dimensions(inputfile[2])
plot.add_units(inputfile[3])
plot.add_reference(inputfile[4])
plot.parsedata(inputfile[5:])
plot.generate_vectors()
return plot
def prepare_plot(plot):
"""Setup and save the plot"""
#start up plot
fig = plt.figure(frameon=False)
r = fig.canvas.get_renderer()
w = plot.width/plot.dpi
h = plot.height/plot.dpi
fig.set_size_inches(w, h)
plt.clf()
ax = plt.subplot()
ax.set_xlim(0., plot.maxxindex+1)
ax.set_ylim(plot.minenergy, plot.maxenergy)
for v in plot.vectors:
ax.plot(v[0], v[1], v[2], color = v[3], lw = v[4])
for t in plot.texts:
tbox = ax.text(t[0], t[1], t[2], color = t[3], va = t[4], ha = t[5], transform=ax.transData)
bb = tbox.get_window_extent(renderer=r)
t.append(ax.transData.inverted().transform(bb))
t[0] = ax.transData.inverted().transform(bb)[0][0]
t[1] = ax.transData.inverted().transform(bb)[0][1]
t[4] = 'bottom'
t[5] = 'left'
plot.find_overlaps()
plt.clf()
ax = plt.subplot()
ax.set_xlim(0., plot.maxxindex+1)
ax.set_ylim(plot.minenergy, plot.maxenergy)
plt.axis("off")
for v in plot.vectors:
ax.plot(v[0], v[1], v[2], color = v[3], lw = v[4], zorder=1)
for t in plot.texts:
if t[6]:
ax.text(t[0], t[1], t[2], color = t[3], va = t[4], ha = t[5],
backgroundcolor = t[6], zorder=2)
else:
ax.text(t[0], t[1], t[2], color = t[3], va = t[4], ha = t[5],
zorder=3)
#plt.show()
plt.savefig(plot.filename, format=plot.filetype)#, bbox_inches='tight')
def main():
"""Run the code"""
parser = argparse.ArgumentParser("Usage: %prog [options]")
parser.add_argument('file',
help="Read plot data from input FILE")
args = parser.parse_args()
infile = args.file
#check file exists, if not request file.
if not check_files_exist([infile]):
exit()
#file is good. let's go!
plot = parse_file(infile)
prepare_plot(plot)
print "OK!"
if __name__ == '__main__':
main()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an API for generating Event protocol buffers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import plugin_asset
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
from tensorflow.python.util.tf_export import tf_export
_PLUGINS_DIR = "plugins"
class SummaryToEventTransformer(object):
"""Abstractly implements the SummaryWriter API.
This API basically implements a number of endpoints (add_summary,
add_session_log, etc). The endpoints all generate an event protobuf, which is
passed to the contained event_writer.
"""
def __init__(self, event_writer, graph=None, graph_def=None):
"""Creates a `SummaryWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.summary.FileWriter(<some-directory>, sess.graph)
```
Args:
event_writer: An EventWriter. Implements add_event and get_logdir.
graph: A `Graph` object, such as `sess.graph`.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
self.event_writer = event_writer
# For storing used tags for session.run() outputs.
self._session_run_tags = {}
if graph is not None or graph_def is not None:
# Calling it with both graph and graph_def for backward compatibility.
self.add_graph(graph=graph, graph_def=graph_def)
# Also export the meta_graph_def in this case.
# graph may itself be a graph_def due to positional arguments
maybe_graph_as_def = (graph.as_graph_def(add_shapes=True)
if isinstance(graph, ops.Graph) else graph)
self.add_meta_graph(
meta_graph.create_meta_graph_def(graph_def=graph_def or
maybe_graph_as_def))
# This set contains tags of Summary Values that have been encountered
# already. The motivation here is that the SummaryWriter only keeps the
# metadata property (which is a SummaryMetadata proto) of the first Summary
# Value encountered for each tag. The SummaryWriter strips away the
# SummaryMetadata for all subsequent Summary Values with tags seen
# previously. This saves space.
self._seen_summary_tags = set()
def add_summary(self, summary, global_step=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
You can pass the result of evaluating any summary op, using
@{tf.Session.run} or
@{tf.Tensor.eval}, to this
function. Alternatively, you can pass a `tf.Summary` protocol
buffer that you populate with your own data. The latter is
commonly done to report evaluation results in event files.
Args:
summary: A `Summary` protocol buffer, optionally serialized as a string.
global_step: Number. Optional global step value to record with the
summary.
"""
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
# We strip metadata from values with tags that we have seen before in order
# to save space - we just store the metadata on the first value with a
# specific tag.
for value in summary.value:
if not value.metadata:
continue
if value.tag in self._seen_summary_tags:
# This tag has been encountered before. Strip the metadata.
value.ClearField("metadata")
continue
# We encounter a value with a tag we have not encountered previously. And
# it has metadata. Remember to strip metadata from future values with this
# tag string.
self._seen_summary_tags.add(value.tag)
event = event_pb2.Event(summary=summary)
self._add_event(event, global_step)
def add_session_log(self, session_log, global_step=None):
"""Adds a `SessionLog` protocol buffer to the event file.
This method wraps the provided session in an `Event` protocol buffer
and adds it to the event file.
Args:
session_log: A `SessionLog` protocol buffer.
global_step: Number. Optional global step value to record with the
summary.
"""
event = event_pb2.Event(session_log=session_log)
self._add_event(event, global_step)
def _add_graph_def(self, graph_def, global_step=None):
graph_bytes = graph_def.SerializeToString()
event = event_pb2.Event(graph_def=graph_bytes)
self._add_event(event, global_step)
def add_graph(self, graph, global_step=None, graph_def=None):
"""Adds a `Graph` to the event file.
The graph described by the protocol buffer will be displayed by
TensorBoard. Most users pass a graph in the constructor instead.
Args:
graph: A `Graph` object, such as `sess.graph`.
global_step: Number. Optional global step counter to record with the
graph.
graph_def: DEPRECATED. Use the `graph` parameter instead.
Raises:
ValueError: If both graph and graph_def are passed to the method.
"""
if graph is not None and graph_def is not None:
raise ValueError("Please pass only graph, or graph_def (deprecated), "
"but not both.")
if isinstance(graph, ops.Graph) or isinstance(graph_def, ops.Graph):
# The user passed a `Graph`.
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if not isinstance(graph, ops.Graph):
logging.warning("When passing a `Graph` object, please use the `graph`"
" named argument instead of `graph_def`.")
graph = graph_def
# Serialize the graph with additional info.
true_graph_def = graph.as_graph_def(add_shapes=True)
self._write_plugin_assets(graph)
elif (isinstance(graph, graph_pb2.GraphDef) or
isinstance(graph_def, graph_pb2.GraphDef)):
# The user passed a `GraphDef`.
logging.warning("Passing a `GraphDef` to the SummaryWriter is deprecated."
" Pass a `Graph` object instead, such as `sess.graph`.")
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if isinstance(graph, graph_pb2.GraphDef):
true_graph_def = graph
else:
true_graph_def = graph_def
else:
# The user passed neither `Graph`, nor `GraphDef`.
raise TypeError("The passed graph must be an instance of `Graph` "
"or the deprecated `GraphDef`")
# Finally, add the graph_def to the summary writer.
self._add_graph_def(true_graph_def, global_step)
def _write_plugin_assets(self, graph):
plugin_assets = plugin_asset.get_all_plugin_assets(graph)
logdir = self.event_writer.get_logdir()
for asset_container in plugin_assets:
plugin_name = asset_container.plugin_name
plugin_dir = os.path.join(logdir, _PLUGINS_DIR, plugin_name)
gfile.MakeDirs(plugin_dir)
assets = asset_container.assets()
for (asset_name, content) in assets.items():
asset_path = os.path.join(plugin_dir, asset_name)
with gfile.Open(asset_path, "w") as f:
f.write(content)
def add_meta_graph(self, meta_graph_def, global_step=None):
"""Adds a `MetaGraphDef` to the event file.
The `MetaGraphDef` allows running the given graph via
`saver.import_meta_graph()`.
Args:
meta_graph_def: A `MetaGraphDef` object, often as returned by
`saver.export_meta_graph()`.
global_step: Number. Optional global step counter to record with the
graph.
Raises:
TypeError: If both `meta_graph_def` is not an instance of `MetaGraphDef`.
"""
if not isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):
raise TypeError("meta_graph_def must be type MetaGraphDef, saw type: %s" %
type(meta_graph_def))
meta_graph_bytes = meta_graph_def.SerializeToString()
event = event_pb2.Event(meta_graph_def=meta_graph_bytes)
self._add_event(event, global_step)
def add_run_metadata(self, run_metadata, tag, global_step=None):
"""Adds a metadata information for a single session.run() call.
Args:
run_metadata: A `RunMetadata` protobuf object.
tag: The tag name for this metadata.
global_step: Number. Optional global step counter to record with the
StepStats.
Raises:
ValueError: If the provided tag was already used for this type of event.
"""
if tag in self._session_run_tags:
raise ValueError("The provided tag was already used for this event type")
self._session_run_tags[tag] = True
tagged_metadata = event_pb2.TaggedRunMetadata()
tagged_metadata.tag = tag
# Store the `RunMetadata` object as bytes in order to have postponed
# (lazy) deserialization when used later.
tagged_metadata.run_metadata = run_metadata.SerializeToString()
event = event_pb2.Event(tagged_run_metadata=tagged_metadata)
self._add_event(event, global_step)
def _add_event(self, event, step):
event.wall_time = time.time()
if step is not None:
event.step = int(step)
self.event_writer.add_event(event)
@tf_export("summary.FileWriter")
class FileWriter(SummaryToEventTransformer):
"""Writes `Summary` protocol buffers to event files.
The `FileWriter` class provides a mechanism to create an event file in a
given directory and add summaries and events to it. The class updates the
file contents asynchronously. This allows a training program to call methods
to add data to the file directly from the training loop, without slowing down
training.
"""
def __init__(self,
logdir,
graph=None,
max_queue=10,
flush_secs=120,
graph_def=None,
filename_suffix=None):
"""Creates a `FileWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.summary.FileWriter(<some-directory>, sess.graph)
```
The other arguments to the constructor control the asynchronous writes to
the event file:
* `flush_secs`: How often, in seconds, to flush the added summaries
and events to disk.
* `max_queue`: Maximum number of summaries or events pending to be
written to disk before one of the 'add' calls block.
Args:
logdir: A string. Directory where event file will be written.
graph: A `Graph` object, such as `sess.graph`.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
graph_def: DEPRECATED: Use the `graph` argument instead.
filename_suffix: A string. Every event file's name is suffixed with
`suffix`.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
`FileWriter` is not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatbility
"""
if context.in_eager_mode():
raise RuntimeError(
"tf.summary.FileWriter is not compatible with eager execution. "
"Use tf.contrib.summary instead.")
event_writer = EventFileWriter(logdir, max_queue, flush_secs,
filename_suffix)
super(FileWriter, self).__init__(event_writer, graph, graph_def)
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self.event_writer.get_logdir()
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
self.event_writer.add_event(event)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self.event_writer.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.event_writer.close()
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
self.event_writer.reopen()
| |
import cv2
import math
import pandas as pd
import numpy as np
import time, sys, os, shutil
import yaml
from multiprocessing import Process, Queue
from Queue import Empty
import random
import imageFeatures as imf
import pickle
from sklearn import gaussian_process
"""
# This script collects data
if len(sys.argv) < 2:
print "No configuration file specified"
collectData = False
config = None
else:
collectData = True
try:
with open(sys.argv[1]) as f:
config = yaml.load(f.read())
except:
print "Error:", sys.exc_info()[0]
raise
"""
def currentTimestamp():
return pd.Timestamp(time.time()*1000000000)
def imageSaver(foldername, q):
while True:
toSave = None
try:
toSave = q.get(True, 1)
except Empty:
pass
if toSave != None:
if toSave == False:
print "Done"
break
name, frame = toSave
cv2.imwrite(foldername + '/' + name, frame, [cv2.IMWRITE_PNG_COMPRESSION, 9])
print "Wrote", foldername + '/' + name
"""
if collectData:
# Parse the configuration file
if 'settingsFile' in config:
rdf = pd.read_csv(config['settingsFile'])
totalFrames = len(rdf)
gains0 = rdf['Gain 0']
shutters0 = rdf['Shutter 0']
gains1 = rdf['Gain 1']
shutters1 = rdf['Shutter 1']
timestamps = pd.Series([currentTimestamp()] * totalFrames)
features = pd.Series([0] * totalFrames)
imageFiles0 = pd.Series([''] * totalFrames)
imageFiles1 = pd.Series([''] * totalFrames)
frames = rdf['Frame']
"""
frames = pd.Series([], dtype=int, name='Frame')
data = pd.DataFrame(index=frames)
params = {}
def setParam(name, x):
params[name] = x
print 'Run name:',
shortname = raw_input()
cv2.namedWindow('frame')
while True:
print 'Parameter name (empty to terminate):',
name = raw_input()
if name != '':
params[name] = 0
print 'max:',
pmax = int(raw_input())
cv2.createTrackbar(name, 'frame', 0, pmax, lambda x: setParam(name, x))
else:
break
# Change 0 to the index that works
cap0 = cv2.VideoCapture(0)
cap1 = cv2.VideoCapture(1)
# Create the output directory and copy over stuff
for i in range(100):
foldername = 'data/' + shortname + '_' + str(i)
if not os.path.exists(foldername):
os.makedirs(foldername)
break
"""
shutil.copy(sys.argv[1], foldername)
if 'settingsFile' in config:
shutil.copy(config['settingsFile'], foldername)
"""
def setCap0Exposure(x):
cap0.set(15,x)
def setCap1Exposure(x):
cap1.set(15,x)
def setCap0Gain(x):
cap0.set(14,x)
def setCap1Gain(x):
cap1.set(14,x)
def setCap0Auto(x):
cap0.set(21,x)
def setCap1Auto(x):
cap1.set(21,x)
def findMeanLumSettings(oldSettings, oldFeatures, newFeatures):
oldShutter, oldGain = oldSettings
newShutter = 1.0
newGain = 16.0
oldMeanLum = oldFeatures
newMeanLum = newFeatures
oldExposure = imf.settingsToExposure(oldShutter, oldGain)
newExposure = 111.2148 + 0.6940*oldExposure - 2.7011*oldMeanLum + 2.6972*newMeanLum
newShutter, newGain = imf.exposureToSettings(newExposure)
return newShutter, newGain
def findLinearFeatureLumSettings(oldSettings, oldFeatures, newFeatures):
oldShutter, oldGain = oldSettings
oldBlurLum = oldFeatures
newBlurLum = newFeatures
oldExposure = imf.settingsToExposure(oldShutter, oldGain)
newExposure = -35.4155 + 0.7933*oldExposure - 2.1544*oldBlurLum + 2.856*newBlurLum
newShutter, newGain = imf.exposureToSettings(newExposure)
return np.clip(newShutter,1.0,531.0), np.clip(newGain,16.0,64.0)
gp = pickle.load(open('gp_mean.p','r'))
#params = ['Exposure 0', 'Contrast 0', 'Contrast 1', 'Blur Luminance 0', 'Blur Luminance 1', 'Mean Foreground Illumination 0', 'Mean BackGround Illumination 0', 'Mean Foreground Illumination 1', 'Mean BackGround Illumination 1']
def findGPSettings(params):
newExposure = gp.predict(params)
newShutter, newGain = imf.exposureToSettings(newExposure)
return np.clip(newShutter,1.0,531.0), np.clip(newGain,16.0,64.0)
def usableMatch(matches, keypoints, keypointsBaseline):
correctMatches = []
minAmmount = 5
srcPts=[]
dstPts=[]
for m,n in matches:
if m.distance <.75*n.distance:
correctMatches.append(m)
if len(correctMatches)>minAmmount:
dst_pts = np.float32([ keypoints[m.trainIdx].pt for m in correctMatches ])
src_pts = np.float32([ keypointsBaseline[m.queryIdx].pt for m in correctMatches ])
ransacMatches, mask= cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
matchesMask = np.array(matchesMask)
numMatches = (matchesMask>.5).sum()
efficiency = [numMatches, len(keypoints)]
else:
efficiency = [0, len(keypoints)]
return efficiency
"""
if not collectData:
cv2.createTrackbar('Shutter Baseline', 'frame', 1, 531, setCap0Exposure)
cv2.createTrackbar('Gain Baseline', 'frame', 16, 64, setCap0Gain)
cv2.createTrackbar('Shutter Compared', 'frame', 1, 531, setCap1Exposure)
cv2.createTrackbar('Gain Compared', 'frame', 16, 64, setCap1Gain)
"""
# Helper variables
t = 0
i = 0
runNum = 0
startT = 0
expCam0 = True
writing = False
resetRun = False
index_params = dict(algorithm = 0, trees = 5)
search_params = dict(checks=50)
surf = cv2.SURF()
def surfDetectAndMatch(name, q, dq):
surf = cv2.SURF()
flann = cv2.FlannBasedMatcher(index_params, search_params)
oldFrame = None
oldKp = None
oldDesc = None
while True:
newFrame = None
try:
newFrame = q.get(True, 1)
print name + ": " + str(q.qsize()) + " left"
except Empty:
if oldFrame != None:
print name + ": Resetting"
oldFrame = None
if newFrame != None:
if newFrame == False:
dq.close()
kp = None
print name + ": Done"
break
if newFrame[2] == False:
kp, desc = surf.detectAndCompute(newFrame[1], None)
else:
kp_temp, desc = newFrame[1]
kp = [cv2.KeyPoint(x=p[0][0], y=p[0][1], _size=p[1], _angle=p[2], _response=p[3],
_octave=p[4], _class_id=p[5]) for p in kp_temp]
if oldFrame != None:
if newFrame[0] == oldFrame[0]:
print name + ": New run detected"
elif newFrame[0]-oldFrame[0] > 1:
print name + ": Warning, t mismatch!"
succTrackFeatures = 0
if desc != None and oldDesc != None:
matches = flann.knnMatch(oldDesc, desc, k=2)
efficiency = usableMatch(matches, kp, oldKp)
succTrackFeatures = efficiency[0]
dq.put((newFrame[0], succTrackFeatures))
oldFrame = newFrame
oldKp = kp
oldDesc = desc
oldParams = None
collectingGP = True
oldMeanLum = None
if cap0.isOpened() and cap1.isOpened():
q = Queue()
p = Process(target=imageSaver, args=(foldername, q,))
q0 = Queue()
dq0 = Queue()
p0 = Process(target=surfDetectAndMatch, args=("SDAM 0", q0, dq0,))
q1 = Queue()
dq1 = Queue()
p1 = Process(target=surfDetectAndMatch, args=("SDAM 1", q1, dq1,))
p.start()
p0.start()
p1.start()
# Turn off white balance
cap0.set(17, -4)
cap0.set(26, -4)
cap1.set(17, -4)
cap1.set(26, -4)
"""
if not collectData:
cv2.setTrackbarPos('Shutter Baseline', 'frame', int(cap0.get(15)))
cv2.setTrackbarPos('Gain Baseline', 'frame', int(cap0.get(14)))
cv2.setTrackbarPos('Shutter Compared', 'frame', int(cap1.get(15)))
cv2.setTrackbarPos('Gain Compared', 'frame', int(cap1.get(14)))
"""
while True:
i += 1
ret0, frame0 = cap0.read()
ret1, frame1 = cap1.read()
if ret0 and ret1:
frame0 = cv2.cvtColor(frame0, cv2.COLOR_BAYER_BG2BGR)
frame1 = cv2.cvtColor(frame1, cv2.COLOR_BAYER_BG2BGR)
disp = np.concatenate((frame0, frame1), axis=1)
try:
t0, succTrackFeatures0 = dq0.get_nowait()
data.loc[t0, 'Succesfully Tracked Features 0'] = succTrackFeatures0
except Empty:
pass
try:
t1, succTrackFeatures1 = dq1.get_nowait()
data.loc[t1, 'Succesfully Tracked Features 1'] = succTrackFeatures1
except Empty:
pass
if writing and i > 6:
gray0 = cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY)
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
# Calculate image features
if expCam0:
kp, desc = surf.detectAndCompute(gray0, None)
kp_temp = [(p.pt, p.size, p.angle, p.response, p.octave, p.class_id) for p in kp]
q0.put((t, (kp_temp, desc), True))
q1.put((t, gray1, False))
meanLum = imf.meanLuminance(gray0)
blurLum = imf.gaussianBlurfeatureLuminance(gray0, kp)
meanFg, meanBg = imf.weightedLuminance(gray0)
contrast = imf.contrast(gray0)
camSettings = (cap0.get(15), cap0.get(14))
else:
kp, desc = surf.detectAndCompute(gray1, None)
kp_temp = [(p.pt, p.size, p.angle, p.response, p.octave, p.class_id) for p in kp]
q1.put((t, (kp_temp, desc), True))
q0.put((t, gray0, False))
meanLum = imf.meanLuminance(gray1)
blurLum = imf.gaussianBlurfeatureLuminance(gray1, kp)
meanFg, meanBg = imf.weightedLuminance(gray1)
contrast = imf.contrast(gray1)
camSettings = (cap1.get(15), cap1.get(14))
newParams = (imf.settingsToExposure(camSettings[0], camSettings[1]),
contrast, blurLum, meanFg, meanBg)
if oldGray0 != None:
# Save raw data
data.loc[t, 'Timestamp'] = currentTimestamp()
data.loc[t, 'Run Number'] = runNum
data.loc[t, 'Baseline'] = 1 if expCam0 else 0
data.loc[t, 'Experimental Mean Luminance'] = meanLum
data.loc[t, 'Shutter 0'] = cap0.get(15)
data.loc[t, 'Gain 0'] = cap0.get(14)
data.loc[t, 'Shutter 1'] = cap1.get(15)
data.loc[t, 'Gain 1'] = cap1.get(14)
imgname0 = shortname + '_0_{:0>4d}.png'.format(t)
data.loc[t, 'Image File 0'] = imgname0
imgname1 = shortname + '_1_{:0>4d}.png'.format(t)
data.loc[t, 'Image File 1'] = imgname1
q.put((imgname0, frame0))
q.put((imgname1, frame1))
if collectingGP:
data.loc[t, 'Experimental Method'] = 'GP'
params = np.array([oldParams[0], oldMeanLum, meanLum])
newShutter, newGain = findGPSettings(params)
else:
data.loc[t, 'Experimental Method'] = 'linear_blur'
newShutter, newGain = findLinearFeatureLumSettings(oldCamSettings, oldBlurLum, blurLum)
# Determine new image settings
if expCam0:
cap0.set(14, newGain)
cap0.set(15, newShutter)
else:
cap1.set(14, newGain)
cap1.set(15, newShutter)
t += 1
oldGray0 = gray0
oldGray1 = gray1
oldParams = newParams
oldBlurLum = blurLum
oldCamSettings = camSettings
oldMeanLum = meanLum
i = 0
cv2.putText(disp, "Frame: " + str(t-startT), (50,50),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255))
cv2.putText(disp, "Baseline: " + ("1" if expCam0 else "0"), (50,80),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255))
cv2.putText(disp, "GP" if collectingGP else "linear_blur", (50,110),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255))
cv2.imshow('frame', disp)
else:
cap0.grab()
cap1.grab()
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
# The order is to press 'w' when starting a run, then press 'r' to do it again in a pair
elif key == ord('w'):
expCam0 = random.choice((True, False))
resetRun = True
elif key == ord('e'):
resetRun = True
elif key == ord('r'):
expCam0 = not expCam0
resetRun = True
elif key == ord('s'):
writing = False
runNum += 1
elif key == ord('g'):
collectingGP = not collectingGP
if resetRun:
resetRun = False
writing = True
startT = t
oldGray0 = None
oldGray1 = None
oldParams = None
i = 0
# To start off, set auto-exposure
cap0.set(14, -2)
cap0.set(15, -2)
cap1.set(14, -2)
cap1.set(15, -2)
q.put(False)
q0.put(False)
q1.put(False)
q.close()
dq0.close()
dq1.close()
q0.close()
q1.close()
#p.join()
#p0.join()
#p1.join()
if len(data) > 0:
data.to_csv(foldername + '/' + shortname + '_rawdata.csv')
| |
# Copyright 2013 OpenStack Foundation
# Copyright 2014 Mirantis Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo_config import cfg
from manila import context
from manila.db import base
from manila import exception
from manila.network.neutron import api as neutron_api
from manila.network.neutron import constants as neutron_constants
from manila import test
from manila.tests.db import fakes
CONF = cfg.CONF
class FakeNeutronClient(object):
def create_port(self, body):
return body
def delete_port(self, port_id):
pass
def show_port(self, port_id):
pass
def list_ports(self, **search_opts):
pass
def list_networks(self):
pass
def show_network(self, network_uuid):
pass
def show_subnet(self, subnet_uuid):
pass
def create_router(self, body):
return body
def list_routers(self):
pass
def create_network(self, body):
return body
def create_subnet(self, body):
return body
def update_port(self, port_id, body):
return body
def add_interface_router(self, router_id, subnet_id, port_id):
pass
def update_router(self, router_id, body):
return body
def show_router(self, router_id):
pass
def list_extensions(self):
pass
class NeutronApiTest(test.TestCase):
def setUp(self):
super(NeutronApiTest, self).setUp()
self.context = context.get_admin_context()
self.mock_object(base, 'Base', fakes.FakeModel)
self.mock_object(
clientv20, 'Client', mock.Mock(return_value=FakeNeutronClient()))
self.neutron_api = neutron_api.API()
def test_create_api_object(self):
# instantiate Neutron API object
neutron_api_instance = neutron_api.API()
# Verify results
self.assertTrue(clientv20.Client.called)
self.assertTrue(hasattr(neutron_api_instance, 'client'))
self.assertTrue(hasattr(neutron_api_instance, 'configuration'))
self.assertEqual('DEFAULT', neutron_api_instance.config_group_name)
def test_create_api_object_custom_config_group(self):
# Set up test data
fake_config_group_name = 'fake_config_group_name'
# instantiate Neutron API object
obj = neutron_api.API(fake_config_group_name)
# Verify results
self.assertTrue(clientv20.Client.called)
self.assertTrue(hasattr(obj, 'client'))
self.assertTrue(hasattr(obj, 'configuration'))
self.assertEqual(
fake_config_group_name, obj.configuration._group.name)
def test_create_port_with_all_args(self):
# Set up test data
self.mock_object(self.neutron_api, '_has_port_binding_extension',
mock.Mock(return_value=True))
port_args = {
'tenant_id': 'test tenant', 'network_id': 'test net',
'host_id': 'test host', 'subnet_id': 'test subnet',
'fixed_ip': 'test ip', 'device_owner': 'test owner',
'device_id': 'test device', 'mac_address': 'test mac',
'security_group_ids': 'test group',
'dhcp_opts': 'test dhcp',
}
# Execute method 'create_port'
port = self.neutron_api.create_port(**port_args)
# Verify results
self.assertEqual(port['tenant_id'], port_args['tenant_id'])
self.assertEqual(port['network_id'], port_args['network_id'])
self.assertEqual(port['binding:host_id'], port_args['host_id'])
self.assertEqual(port['fixed_ips'][0]['subnet_id'],
port_args['subnet_id'])
self.assertEqual(port['fixed_ips'][0]['ip_address'],
port_args['fixed_ip'])
self.assertEqual(port['device_owner'], port_args['device_owner'])
self.assertEqual(port['device_id'], port_args['device_id'])
self.assertEqual(port['mac_address'], port_args['mac_address'])
self.assertEqual(port['security_groups'],
port_args['security_group_ids'])
self.assertEqual(port['extra_dhcp_opts'], port_args['dhcp_opts'])
self.neutron_api._has_port_binding_extension.assert_called_once_with()
self.assertTrue(clientv20.Client.called)
def test_create_port_with_required_args(self):
# Set up test data
self.mock_object(self.neutron_api, '_has_port_binding_extension',
mock.Mock(return_value=True))
port_args = {'tenant_id': 'test tenant', 'network_id': 'test net'}
# Execute method 'create_port'
port = self.neutron_api.create_port(**port_args)
# Verify results
self.assertEqual(port['tenant_id'], port_args['tenant_id'])
self.assertEqual(port['network_id'],
port_args['network_id'])
self.neutron_api._has_port_binding_extension.assert_called_once_with()
self.assertTrue(clientv20.Client.called)
@mock.patch.object(neutron_api.LOG, 'exception', mock.Mock())
def test_create_port_exception(self):
# Set up test data
self.mock_object(
self.neutron_api, '_has_port_binding_extension',
mock.Mock(return_value=True))
self.mock_object(
self.neutron_api.client, 'create_port',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException))
port_args = {'tenant_id': 'test tenant', 'network_id': 'test net'}
# Execute method 'create_port'
self.assertRaises(exception.NetworkException,
self.neutron_api.create_port,
**port_args)
# Verify results
self.neutron_api._has_port_binding_extension.assert_called_once_with()
self.assertTrue(neutron_api.LOG.exception.called)
self.assertTrue(clientv20.Client.called)
self.assertTrue(self.neutron_api.client.create_port.called)
@mock.patch.object(neutron_api.LOG, 'exception', mock.Mock())
def test_create_port_exception_status_409(self):
# Set up test data
self.mock_object(
self.neutron_api, '_has_port_binding_extension',
mock.Mock(return_value=True))
self.mock_object(
self.neutron_api.client, 'create_port',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException(
status_code=409)))
port_args = {'tenant_id': 'test tenant', 'network_id': 'test net'}
# Execute method 'create_port'
self.assertRaises(exception.PortLimitExceeded,
self.neutron_api.create_port,
**port_args)
# Verify results
self.neutron_api._has_port_binding_extension.assert_called_once_with()
self.assertTrue(neutron_api.LOG.exception.called)
self.assertTrue(clientv20.Client.called)
self.assertTrue(self.neutron_api.client.create_port.called)
def test_delete_port(self):
# Set up test data
self.mock_object(self.neutron_api.client, 'delete_port')
port_id = 'test port id'
# Execute method 'delete_port'
self.neutron_api.delete_port(port_id)
# Verify results
self.neutron_api.client.delete_port.assert_called_once_with(port_id)
self.assertTrue(clientv20.Client.called)
def test_list_ports(self):
# Set up test data
search_opts = {'test_option': 'test_value'}
fake_ports = [{'fake port': 'fake port info'}]
self.mock_object(
self.neutron_api.client, 'list_ports',
mock.Mock(return_value={'ports': fake_ports}))
# Execute method 'list_ports'
ports = self.neutron_api.list_ports(**search_opts)
# Verify results
self.assertEqual(fake_ports, ports)
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.list_ports.assert_called_once_with(
**search_opts)
def test_show_port(self):
# Set up test data
port_id = 'test port id'
fake_port = {'fake port': 'fake port info'}
self.mock_object(
self.neutron_api.client, 'show_port',
mock.Mock(return_value={'port': fake_port}))
# Execute method 'show_port'
port = self.neutron_api.show_port(port_id)
# Verify results
self.assertEqual(fake_port, port)
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.show_port.assert_called_once_with(port_id)
def test_get_network(self):
# Set up test data
network_id = 'test network id'
fake_network = {'fake network': 'fake network info'}
self.mock_object(
self.neutron_api.client, 'show_network',
mock.Mock(return_value={'network': fake_network}))
# Execute method 'get_network'
network = self.neutron_api.get_network(network_id)
# Verify results
self.assertEqual(fake_network, network)
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.show_network.assert_called_once_with(
network_id)
def test_get_subnet(self):
# Set up test data
subnet_id = 'fake subnet id'
self.mock_object(
self.neutron_api.client, 'show_subnet',
mock.Mock(return_value={'subnet': {}}))
# Execute method 'get_subnet'
subnet = self.neutron_api.get_subnet(subnet_id)
# Verify results
self.assertEqual({}, subnet)
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.show_subnet.assert_called_once_with(
subnet_id)
def test_get_all_network(self):
# Set up test data
fake_networks = [{'fake network': 'fake network info'}]
self.mock_object(
self.neutron_api.client, 'list_networks',
mock.Mock(return_value={'networks': fake_networks}))
# Execute method 'get_all_networks'
networks = self.neutron_api.get_all_networks()
# Verify results
self.assertEqual(fake_networks, networks)
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.list_networks.assert_called_once_with()
def test_list_extensions(self):
# Set up test data
extensions = [
{'name': neutron_constants.PORTBINDING_EXT},
{'name': neutron_constants.PROVIDER_NW_EXT},
]
self.mock_object(
self.neutron_api.client, 'list_extensions',
mock.Mock(return_value={'extensions': extensions}))
# Execute method 'list_extensions'
result = self.neutron_api.list_extensions()
# Verify results
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.list_extensions.assert_called_once_with()
self.assertIn(neutron_constants.PORTBINDING_EXT, result)
self.assertIn(neutron_constants.PROVIDER_NW_EXT, result)
self.assertEqual(
extensions[0], result[neutron_constants.PORTBINDING_EXT])
self.assertEqual(
extensions[1], result[neutron_constants.PROVIDER_NW_EXT])
def test_create_network(self):
# Set up test data
net_args = {'tenant_id': 'test tenant', 'name': 'test name'}
# Execute method 'network_create'
network = self.neutron_api.network_create(**net_args)
# Verify results
self.assertEqual(net_args['tenant_id'], network['tenant_id'])
self.assertEqual(net_args['name'], network['name'])
self.assertTrue(clientv20.Client.called)
def test_create_subnet(self):
# Set up test data
subnet_args = {
'tenant_id': 'test tenant',
'name': 'test name',
'net_id': 'test net id',
'cidr': '10.0.0.0/24',
}
# Execute method 'subnet_create'
subnet = self.neutron_api.subnet_create(**subnet_args)
# Verify results
self.assertEqual(subnet_args['tenant_id'], subnet['tenant_id'])
self.assertEqual(subnet_args['name'], subnet['name'])
self.assertTrue(clientv20.Client.called)
def test_create_router(self):
# Set up test data
router_args = {'tenant_id': 'test tenant', 'name': 'test name'}
# Execute method 'router_create'
router = self.neutron_api.router_create(**router_args)
# Verify results
self.assertEqual(router_args['tenant_id'], router['tenant_id'])
self.assertEqual(router_args['name'], router['name'])
self.assertTrue(clientv20.Client.called)
def test_list_routers(self):
# Set up test data
fake_routers = [{'fake router': 'fake router info'}]
self.mock_object(
self.neutron_api.client, 'list_routers',
mock.Mock(return_value={'routers': fake_routers}))
# Execute method 'router_list'
networks = self.neutron_api.router_list()
# Verify results
self.assertEqual(fake_routers, networks)
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.list_routers.assert_called_once_with()
def test_create_network_exception(self):
# Set up test data
net_args = {'tenant_id': 'test tenant', 'name': 'test name'}
self.mock_object(
self.neutron_api.client, 'create_network',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException))
# Execute method 'network_create'
self.assertRaises(
exception.NetworkException,
self.neutron_api.network_create,
**net_args)
# Verify results
self.neutron_api.client.create_network.assert_called_once_with(
{'network': net_args})
self.assertTrue(clientv20.Client.called)
def test_create_subnet_exception(self):
# Set up test data
subnet_args = {
'tenant_id': 'test tenant',
'name': 'test name',
'net_id': 'test net id',
'cidr': '10.0.0.0/24',
}
self.mock_object(
self.neutron_api.client, 'create_subnet',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException))
# Execute method 'subnet_create'
self.assertRaises(
exception.NetworkException,
self.neutron_api.subnet_create,
**subnet_args)
# Verify results
expected_data = {
'network_id': subnet_args['net_id'],
'tenant_id': subnet_args['tenant_id'],
'cidr': subnet_args['cidr'],
'name': subnet_args['name'],
'ip_version': 4,
}
self.neutron_api.client.create_subnet.assert_called_once_with(
{'subnet': expected_data})
self.assertTrue(clientv20.Client.called)
def test_create_router_exception(self):
# Set up test data
router_args = {'tenant_id': 'test tenant', 'name': 'test name'}
self.mock_object(
self.neutron_api.client, 'create_router',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException))
# Execute method 'router_create'
self.assertRaises(
exception.NetworkException,
self.neutron_api.router_create,
**router_args)
# Verify results
self.neutron_api.client.create_router.assert_called_once_with(
{'router': router_args})
self.assertTrue(clientv20.Client.called)
def test_update_port_fixed_ips(self):
# Set up test data
port_id = 'test_port'
fixed_ips = {'fixed_ips': [{'subnet_id': 'test subnet'}]}
# Execute method 'update_port_fixed_ips'
port = self.neutron_api.update_port_fixed_ips(port_id, fixed_ips)
# Verify results
self.assertEqual(fixed_ips, port)
self.assertTrue(clientv20.Client.called)
def test_update_port_fixed_ips_exception(self):
# Set up test data
port_id = 'test_port'
fixed_ips = {'fixed_ips': [{'subnet_id': 'test subnet'}]}
self.mock_object(
self.neutron_api.client, 'update_port',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException))
# Execute method 'update_port_fixed_ips'
self.assertRaises(
exception.NetworkException,
self.neutron_api.update_port_fixed_ips,
port_id, fixed_ips)
# Verify results
self.neutron_api.client.update_port.assert_called_once_with(
port_id, {'port': fixed_ips})
self.assertTrue(clientv20.Client.called)
def test_router_update_routes(self):
# Set up test data
router_id = 'test_router'
routes = {
'routes': [
{'destination': '0.0.0.0/0', 'nexthop': '8.8.8.8', },
],
}
# Execute method 'router_update_routes'
router = self.neutron_api.router_update_routes(router_id, routes)
# Verify results
self.assertEqual(routes, router)
self.assertTrue(clientv20.Client.called)
def test_router_update_routes_exception(self):
# Set up test data
router_id = 'test_router'
routes = {
'routes': [
{'destination': '0.0.0.0/0', 'nexthop': '8.8.8.8', },
],
}
self.mock_object(
self.neutron_api.client, 'update_router',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException))
# Execute method 'router_update_routes'
self.assertRaises(
exception.NetworkException,
self.neutron_api.router_update_routes,
router_id, routes)
# Verify results
self.neutron_api.client.update_router.assert_called_once_with(
router_id, {'router': routes})
self.assertTrue(clientv20.Client.called)
def test_show_router(self):
# Set up test data
router_id = 'test router id'
fake_router = {'fake router': 'fake router info'}
self.mock_object(
self.neutron_api.client, 'show_router',
mock.Mock(return_value={'router': fake_router}))
# Execute method 'show_router'
port = self.neutron_api.show_router(router_id)
# Verify results
self.assertEqual(fake_router, port)
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.show_router.assert_called_once_with(router_id)
def test_router_add_interface(self):
# Set up test data
router_id = 'test port id'
subnet_id = 'test subnet id'
port_id = 'test port id'
self.mock_object(self.neutron_api.client, 'add_interface_router')
# Execute method 'router_add_interface'
self.neutron_api.router_add_interface(router_id, subnet_id, port_id)
# Verify results
self.neutron_api.client.add_interface_router.assert_called_once_with(
port_id, {'subnet_id': subnet_id, 'port_id': port_id})
self.assertTrue(clientv20.Client.called)
def test_router_add_interface_exception(self):
# Set up test data
router_id = 'test port id'
subnet_id = 'test subnet id'
port_id = 'test port id'
self.mock_object(
self.neutron_api.client, 'add_interface_router',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException))
# Execute method 'router_add_interface'
self.assertRaises(
exception.NetworkException,
self.neutron_api.router_add_interface,
router_id, subnet_id, port_id)
# Verify results
self.neutron_api.client.add_interface_router.assert_called_once_with(
router_id, {'subnet_id': subnet_id, 'port_id': port_id})
self.assertTrue(clientv20.Client.called)
def test_admin_project_id_exist(self):
fake_admin_project_id = 'fake_admin_project_id_value'
self.neutron_api.client.httpclient = mock.Mock()
self.neutron_api.client.httpclient.auth_token = mock.Mock()
self.neutron_api.client.httpclient.auth_tenant_id = (
fake_admin_project_id)
admin_project_id = self.neutron_api.admin_project_id
self.assertEqual(fake_admin_project_id, admin_project_id)
self.neutron_api.client.httpclient.auth_token.called
def test_admin_project_id_not_exist(self):
fake_admin_project_id = 'fake_admin_project_id_value'
self.neutron_api.client.httpclient = mock.Mock()
self.neutron_api.client.httpclient.auth_token = mock.Mock(
return_value=None)
self.neutron_api.client.httpclient.authenticate = mock.Mock()
self.neutron_api.client.httpclient.auth_tenant_id = (
fake_admin_project_id)
admin_project_id = self.neutron_api.admin_project_id
self.assertEqual(fake_admin_project_id, admin_project_id)
self.neutron_api.client.httpclient.auth_token.called
self.neutron_api.client.httpclient.authenticate.called
def test_admin_project_id_not_exist_with_failure(self):
self.neutron_api.client.httpclient = mock.Mock()
self.neutron_api.client.httpclient.auth_token = None
self.neutron_api.client.httpclient.authenticate = mock.Mock(
side_effect=neutron_client_exc.NeutronClientException)
self.neutron_api.client.httpclient.auth_tenant_id = mock.Mock()
try:
self.neutron_api.admin_project_id
except exception.NetworkException:
pass
else:
raise Exception('Expected error was not raised')
self.assertTrue(self.neutron_api.client.httpclient.authenticate.called)
self.assertFalse(
self.neutron_api.client.httpclient.auth_tenant_id.called)
def test_get_all_admin_project_networks(self):
fake_networks = {'networks': ['fake_net_1', 'fake_net_2']}
self.mock_object(
self.neutron_api.client, 'list_networks',
mock.Mock(return_value=fake_networks))
self.neutron_api.client.httpclient = mock.Mock()
self.neutron_api.client.httpclient.auth_token = mock.Mock()
self.neutron_api.client.httpclient.auth_tenant_id = mock.Mock()
networks = self.neutron_api.get_all_admin_project_networks()
self.assertEqual(fake_networks['networks'], networks)
self.neutron_api.client.httpclient.auth_token.called
self.neutron_api.client.httpclient.auth_tenant_id.called
self.neutron_api.client.list_networks.assert_called_once_with(
tenant_id=self.neutron_api.admin_project_id, shared=False)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import re
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from oslo_versionedobjects import exception as ovoo_exc
import six
from nova.db.sqlalchemy import api as db
from nova.db.sqlalchemy import api_models
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
@base.NovaObjectRegistry.register
class BuildRequest(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added block_device_mappings
# Version 1.2: Added save() method
# Version 1.3: Added tags
VERSION = '1.3'
fields = {
'id': fields.IntegerField(),
'instance_uuid': fields.UUIDField(),
'project_id': fields.StringField(),
'instance': fields.ObjectField('Instance'),
'block_device_mappings': fields.ObjectField('BlockDeviceMappingList'),
# NOTE(alaski): Normally these would come from the NovaPersistentObject
# mixin but they're being set explicitly because we only need
# created_at/updated_at. There is no soft delete for this object.
'created_at': fields.DateTimeField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
'tags': fields.ObjectField('TagList'),
}
def obj_make_compatible(self, primitive, target_version):
super(BuildRequest, self).obj_make_compatible(primitive,
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 1) and 'block_device_mappings' in primitive:
del primitive['block_device_mappings']
elif target_version < (1, 3) and 'tags' in primitive:
del primitive['tags']
def _load_instance(self, db_instance):
# NOTE(alaski): Be very careful with instance loading because it
# changes more than most objects.
try:
self.instance = objects.Instance.obj_from_primitive(
jsonutils.loads(db_instance))
except TypeError:
LOG.debug('Failed to load instance from BuildRequest with uuid '
'%s because it is None', self.instance_uuid)
raise exception.BuildRequestNotFound(uuid=self.instance_uuid)
except ovoo_exc.IncompatibleObjectVersion:
# This should only happen if proper service upgrade strategies are
# not followed. Log the exception and raise BuildRequestNotFound.
# If the instance can't be loaded this object is useless and may
# as well not exist.
LOG.debug('Could not deserialize instance store in BuildRequest '
'with uuid %(instance_uuid)s. Found version %(version)s '
'which is not supported here.',
dict(instance_uuid=self.instance_uuid,
version=jsonutils.loads(
db_instance)["nova_object.version"]))
LOG.exception('Could not deserialize instance in BuildRequest')
raise exception.BuildRequestNotFound(uuid=self.instance_uuid)
# NOTE(sbauza): The instance primitive should already have the deleted
# field being set, so when hydrating it back here, we should get the
# right value but in case we don't have it, let's suppose that the
# instance is not deleted, which is the default value for that field.
# NOTE(mriedem): Same for the "hidden" field.
self.instance.obj_set_defaults('deleted', 'hidden')
# NOTE(alaski): Set some fields on instance that are needed by the api,
# not lazy-loadable, and don't change.
self.instance.disable_terminate = False
self.instance.terminated_at = None
self.instance.host = None
self.instance.node = None
self.instance.launched_at = None
self.instance.launched_on = None
self.instance.cell_name = None
# The fields above are not set until the instance is in a cell at
# which point this BuildRequest will be gone. locked_by could
# potentially be set by an update so it should not be overwritten.
if not self.instance.obj_attr_is_set('locked_by'):
self.instance.locked_by = None
# created_at/updated_at are not on the serialized instance because it
# was never persisted.
self.instance.created_at = self.created_at
self.instance.updated_at = self.updated_at
self.instance.tags = self.tags
def _load_block_device_mappings(self, db_bdms):
# 'db_bdms' is a serialized BlockDeviceMappingList object. If it's None
# we're in a mixed version nova-api scenario and can't retrieve the
# actual list. Set it to an empty list here which will cause a
# temporary API inconsistency that will be resolved as soon as the
# instance is scheduled and on a compute.
if db_bdms is None:
LOG.debug('Failed to load block_device_mappings from BuildRequest '
'for instance %s because it is None', self.instance_uuid)
self.block_device_mappings = objects.BlockDeviceMappingList()
return
self.block_device_mappings = (
objects.BlockDeviceMappingList.obj_from_primitive(
jsonutils.loads(db_bdms)))
def _load_tags(self, db_tags):
# 'db_tags' is a serialized TagList object. If it's None
# we're in a mixed version nova-api scenario and can't retrieve the
# actual list. Set it to an empty list here which will cause a
# temporary API inconsistency that will be resolved as soon as the
# instance is scheduled and on a compute.
if db_tags is None:
LOG.debug('Failed to load tags from BuildRequest '
'for instance %s because it is None', self.instance_uuid)
self.tags = objects.TagList()
return
self.tags = (
objects.TagList.obj_from_primitive(
jsonutils.loads(db_tags)))
@staticmethod
def _from_db_object(context, req, db_req):
# Set this up front so that it can be pulled for error messages or
# logging at any point.
req.instance_uuid = db_req['instance_uuid']
for key in req.fields:
if key == 'instance':
continue
elif isinstance(req.fields[key], fields.ObjectField):
try:
getattr(req, '_load_%s' % key)(db_req[key])
except AttributeError:
LOG.exception('No load handler for %s', key)
else:
setattr(req, key, db_req[key])
# Load instance last because other fields on req may be referenced
req._load_instance(db_req['instance'])
req.obj_reset_changes(recursive=True)
req._context = context
return req
@staticmethod
@db.api_context_manager.reader
def _get_by_instance_uuid_from_db(context, instance_uuid):
db_req = context.session.query(api_models.BuildRequest).filter_by(
instance_uuid=instance_uuid).first()
if not db_req:
raise exception.BuildRequestNotFound(uuid=instance_uuid)
return db_req
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_req = cls._get_by_instance_uuid_from_db(context, instance_uuid)
return cls._from_db_object(context, cls(), db_req)
@staticmethod
@db.api_context_manager.writer
def _create_in_db(context, updates):
db_req = api_models.BuildRequest()
db_req.update(updates)
db_req.save(context.session)
return db_req
def _get_update_primitives(self):
updates = self.obj_get_changes()
for key, value in updates.items():
if isinstance(self.fields[key], fields.ObjectField):
updates[key] = jsonutils.dumps(value.obj_to_primitive())
return updates
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
if not self.obj_attr_is_set('instance_uuid'):
# We can't guarantee this is not null in the db so check here
raise exception.ObjectActionError(action='create',
reason='instance_uuid must be set')
updates = self._get_update_primitives()
db_req = self._create_in_db(self._context, updates)
self._from_db_object(self._context, self, db_req)
@staticmethod
@db.api_context_manager.writer
def _destroy_in_db(context, instance_uuid):
result = context.session.query(api_models.BuildRequest).filter_by(
instance_uuid=instance_uuid).delete()
if not result:
raise exception.BuildRequestNotFound(uuid=instance_uuid)
@base.remotable
def destroy(self):
self._destroy_in_db(self._context, self.instance_uuid)
@db.api_context_manager.writer
def _save_in_db(self, context, req_id, updates):
db_req = context.session.query(
api_models.BuildRequest).filter_by(id=req_id).first()
if not db_req:
raise exception.BuildRequestNotFound(uuid=self.instance_uuid)
db_req.update(updates)
context.session.add(db_req)
return db_req
@base.remotable
def save(self):
updates = self._get_update_primitives()
db_req = self._save_in_db(self._context, self.id, updates)
self._from_db_object(self._context, self, db_req)
def get_new_instance(self, context):
# NOTE(danms): This is a hack to make sure that the returned
# instance has all dirty fields. There are probably better
# ways to do this, but they kinda involve o.vo internals
# so this is okay for the moment.
instance = objects.Instance(context)
for field in self.instance.obj_fields:
# NOTE(danms): Don't copy the defaulted tags field
# as instance.create() won't handle it properly.
# TODO(zhengzhenyu): Handle this when the API supports creating
# servers with tags.
if field == 'tags':
continue
if self.instance.obj_attr_is_set(field):
setattr(instance, field, getattr(self.instance, field))
return instance
@base.NovaObjectRegistry.register
class BuildRequestList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('BuildRequest'),
}
@staticmethod
@db.api_context_manager.reader
def _get_all_from_db(context):
query = context.session.query(api_models.BuildRequest)
if not context.is_admin:
query = query.filter_by(project_id=context.project_id)
db_reqs = query.all()
return db_reqs
@base.remotable_classmethod
def get_all(cls, context):
db_build_reqs = cls._get_all_from_db(context)
return base.obj_make_list(context, cls(context), objects.BuildRequest,
db_build_reqs)
@staticmethod
def _pass_exact_filters(instance, filters):
for filter_key, filter_val in filters.items():
if filter_key in ('metadata', 'system_metadata'):
if isinstance(filter_val, list):
for item in filter_val:
for k, v in item.items():
if (k not in instance.metadata or
v != instance.metadata[k]):
return False
else:
for k, v in filter_val.items():
if (k not in instance.metadata or
v != instance.metadata[k]):
return False
elif filter_key in (
'tags', 'tags-any', 'not-tags', 'not-tags-any'):
# Get the list of simple string tags first.
tags = ([tag.tag for tag in instance.tags]
if instance.tags else [])
if filter_key == 'tags':
for item in filter_val:
if item not in tags:
return False
elif filter_key == 'tags-any':
found = []
for item in filter_val:
if item in tags:
found.append(item)
if not found:
return False
elif filter_key == 'not-tags':
found = []
for item in filter_val:
if item in tags:
found.append(item)
if len(found) == len(filter_val):
return False
elif filter_key == 'not-tags-any':
for item in filter_val:
if item in tags:
return False
elif isinstance(filter_val, (list, tuple, set, frozenset)):
if not filter_val:
# Special value to indicate that nothing will match.
return None
if instance.get(filter_key, None) not in filter_val:
return False
else:
if instance.get(filter_key, None) != filter_val:
return False
return True
@staticmethod
def _pass_regex_filters(instance, filters):
for filter_name, filter_val in filters.items():
try:
instance_attr = getattr(instance, filter_name)
except AttributeError:
continue
# Sometimes the REGEX filter value is not a string
if not isinstance(filter_val, six.string_types):
filter_val = str(filter_val)
filter_re = re.compile(filter_val)
if instance_attr and not filter_re.search(str(instance_attr)):
return False
return True
@staticmethod
def _sort_build_requests(build_req_list, sort_keys, sort_dirs):
# build_req_list is a [] of build_reqs
sort_keys.reverse()
sort_dirs.reverse()
def sort_attr(sort_key, build_req):
if sort_key == 'id':
# 'id' is not set on the instance yet. Use the BuildRequest
# 'id' instead.
return build_req.id
return getattr(build_req.instance, sort_key)
for sort_key, sort_dir in zip(sort_keys, sort_dirs):
reverse = False if sort_dir.lower().startswith('asc') else True
build_req_list.sort(key=functools.partial(sort_attr, sort_key),
reverse=reverse)
return build_req_list
@base.remotable_classmethod
def get_by_filters(cls, context, filters, limit=None, marker=None,
sort_keys=None, sort_dirs=None):
# Short-circuit on anything that will not yield results.
# 'deleted' records can not be returned from here since build_requests
# are not soft deleted.
# 'cleaned' records won't exist as they would need to be deleted.
if (limit == 0 or
filters.get('deleted', False) or
filters.get('cleaned', False)):
# If we have a marker honor the MarkerNotFound semantics.
if marker:
raise exception.MarkerNotFound(marker=marker)
return cls(context, objects=[])
# Because the build_requests table stores an instance as a serialized
# versioned object it is not feasible to do the filtering and sorting
# in the database. Just get all potentially relevant records and
# process them here. It should be noted that build requests are short
# lived so there should not be a lot of results to deal with.
build_requests = cls.get_all(context)
# Fortunately some filters do not apply here.
# 'changes-since' works off of the updated_at field which has not yet
# been set at the point in the boot process where build_request still
# exists. So it can be ignored.
# 'deleted' and 'cleaned' are handled above.
sort_keys, sort_dirs = db.process_sort_params(sort_keys, sort_dirs,
default_dir='desc')
# For other filters that don't match this, we will do regexp matching
# Taken from db/sqlalchemy/api.py
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
'metadata', 'host', 'task_state',
'system_metadata', 'tags', 'tags-any',
'not-tags', 'not-tags-any']
exact_filters = {}
regex_filters = {}
for key, value in filters.items():
if key in exact_match_filter_names:
exact_filters[key] = value
else:
regex_filters[key] = value
# As much as possible this copies the logic from db/sqlalchemy/api.py
# instance_get_all_by_filters_sort. The main difference is that method
# builds a sql query and this filters in python.
filtered_build_reqs = []
for build_req in build_requests:
instance = build_req.instance
filter_result = cls._pass_exact_filters(instance, exact_filters)
if filter_result is None:
# The filter condition is such that nothing will match.
# Bail early.
return cls(context, objects=[])
if filter_result is False:
continue
if not cls._pass_regex_filters(instance, regex_filters):
continue
filtered_build_reqs.append(build_req)
if (((len(filtered_build_reqs) < 2) or (not sort_keys)) and
not marker):
# No need to sort
return cls(context, objects=filtered_build_reqs)
sorted_build_reqs = cls._sort_build_requests(filtered_build_reqs,
sort_keys, sort_dirs)
marker_index = 0
if marker:
for i, build_req in enumerate(sorted_build_reqs):
if build_req.instance.uuid == marker:
# The marker is the last seen item in the last page, so
# we increment the index to the next item immediately
# after the marker so the marker is not returned.
marker_index = i + 1
break
else:
raise exception.MarkerNotFound(marker=marker)
len_build_reqs = len(sorted_build_reqs)
limit_index = len_build_reqs
if limit:
limit_index = marker_index + limit
if limit_index > len_build_reqs:
limit_index = len_build_reqs
return cls(context,
objects=sorted_build_reqs[marker_index:limit_index])
| |
"""IntegerPartitions.py
Generate and manipulate partitions of integers into sums of integers.
D. Eppstein, August 2005.
"""
def mckay(n):
"""
Integer partitions of n, in reverse lexicographic order.
Note that the generated output consists of the same list object,
repeated the correct number of times; the caller must leave this
list unchanged, and must make a copy of any partition that is
intended to last longer than the next call into the generator.
The algorithm follows Knuth v4 fasc3 p38 in rough outline.
"""
if n == 0:
yield []
if n <= 0:
return
partition = [n]
last_nonunit = (n > 1) - 1
while True:
yield partition
if last_nonunit < 0:
return
if partition[last_nonunit] == 2:
partition[last_nonunit] = 1
partition.append(1)
last_nonunit -= 1
continue
replacement = partition[last_nonunit] - 1
total_replaced = replacement + len(partition) - last_nonunit
reps, rest = divmod(total_replaced, replacement)
partition[last_nonunit:] = reps * [replacement]
if rest:
partition.append(rest)
last_nonunit = len(partition) - (partition[-1] == 1) - 1
def revlex_partitions(n):
"""
Integer partitions of n, in reverse lexicographic order.
The output and asymptotic runtime are the same as mckay(n),
but the algorithm is different: it involves no division,
and is simpler than mckay, but uses O(n) extra space for
a recursive call stack.
"""
if n == 0:
yield []
if n <= 0:
return
for p in revlex_partitions(n - 1):
if len(p) == 1 or (len(p) > 1 and p[-1] < p[-2]):
p[-1] += 1
yield p
p[-1] -= 1
p.append(1)
yield p
p.pop()
def lex_partitions(n):
"""Similar to revlex_partitions, but in lexicographic order."""
if n == 0:
yield []
if n <= 0:
return
for p in lex_partitions(n - 1):
p.append(1)
yield p
p.pop()
if len(p) == 1 or (len(p) > 1 and p[-1] < p[-2]):
p[-1] += 1
yield p
p[-1] -= 1
partitions = revlex_partitions # default partition generating algorithm
def binary_partitions(n):
"""
Generate partitions of n into powers of two, in revlex order.
Knuth exercise 7.2.1.4.64.
The average time per output is constant.
But this doesn't really solve the exercise, because it isn't loopless...
"""
# Generate the binary representation of n
if n < 0:
return
pow = 1
sum = 0
while pow <= n:
pow <<= 1
partition = []
while pow:
if sum + pow <= n:
partition.append(pow)
sum += pow
pow >>= 1
# Find all partitions of numbers up to n into powers of two > 1,
# in revlex order, by repeatedly splitting the smallest nonunit power,
# and replacing the following sequence of 1's by the first revlex
# partition with maximum power less than the result of the split.
# Time analysis:
#
# Each outer iteration increases len(partition) by at most one
# (only if the power being split is a 2) and each inner iteration
# in which some ones are replaced by x decreases len(partition),
# so the number of those inner iterations is less than one per
# output.
#
# Each time a power 2^k is split, it creates two or more 2^{k-1}'s,
# all of which must eventually be split as well. So, it S_k denotes
# the number of times a 2^k is split, and X denotes the total
# number of outputs generated, then S_k <= X/2^{k-1}.
# On an outer iteration in which 2^k is split, there will be k
# inner iterations in which x is halved, so the total number
# of such inner iterations is <= sum_k k*X/2^{k-1} = O(X).
#
# Therefore the overall average time per output is constant.
last_nonunit = len(partition) - 1 - (n & 1)
while True:
yield partition
if last_nonunit < 0:
return
if partition[last_nonunit] == 2:
partition[last_nonunit] = 1
partition.append(1)
last_nonunit -= 1
continue
partition.append(1)
x = partition[last_nonunit] = partition[last_nonunit + 1] = \
partition[last_nonunit] >> 1 # make the split!
last_nonunit += 1
while x > 1:
if len(partition) - last_nonunit - 1 >= x:
del partition[-x + 1:]
last_nonunit += 1
partition[last_nonunit] = x
else:
x >>= 1
def fixed_length_partitions(n, L):
"""
Integer partitions of n into L parts, in colex order.
The algorithm follows Knuth v4 fasc3 p38 in rough outline;
Knuth credits it to Hindenburg, 1779.
"""
# guard against special cases
if L == 0:
if n == 0:
yield []
return
if L == 1:
if n > 0:
yield [n]
return
if n < L:
return
partition = [n - L + 1] + (L - 1) * [1]
while True:
yield partition
if partition[0] - 1 > partition[1]:
partition[0] -= 1
partition[1] += 1
continue
j = 2
s = partition[0] + partition[1] - 1
while j < L and partition[j] >= partition[0] - 1:
s += partition[j]
j += 1
if j >= L:
return
partition[j] = x = partition[j] + 1
j -= 1
while j > 0:
partition[j] = x
s -= x
j -= 1
partition[0] = s
def conjugate(p):
"""
Find the conjugate of a partition.
E.g. len(p) = max(conjugate(p)) and vice versa.
"""
result = []
j = len(p)
if j <= 0:
return result
while True:
result.append(j)
while len(result) >= p[j - 1]:
j -= 1
if j == 0:
return result
| |
#!/usr/bin/python
# JabberBot: A simple jabber/xmpp bot framework
# Copyright (c) 2007-2009 Thomas Perl <thpinfo.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Homepage: http://thpinfo.com/2007/python-jabberbot/
#
import sys
try:
import xmpp
except ImportError:
print >>sys.stderr, 'You need to install xmpppy from http://xmpppy.sf.net/.'
sys.exit(-1)
import inspect
"""A simple jabber/xmpp bot framework
This is a simple bot framework around the "xmpppy" framework.
Copyright (c) 2007-2009 Thomas Perl <thpinfo.com>
To use, subclass the "JabberBot" class and implement "bot_" methods
(or whatever you set the command_prefix to), like this:
class StupidEchoBot(JabberBot):
def bot_echo( self, mess, args):
"The command description goes here"
return 'You said: ' + args
def bot_subscribe( self, mess, args):
"HIDDEN (Authorize the presence subscription request)"
# The docstring for this command has "HIDDEN" in it, so
# the help output does not show this command.
f = mess.getFrom()
self.conn.Roster.Authorize( f)
return 'Authorized.'
def unknown_command( self, mess, cmd, args):
"This optional method, if present, gets called if the
command is not recognized."
if args.split()[0].startswith( 'cheese'):
return 'Sorry, cheesy commands not available.'
else:
# if we return None, the default 'unknown command' text will get printed.
return None
username = 'jid@server.example.com'
password = 'mypassword'
bot = StupidEchoBot( username, password)
bot.serve_forever()
"""
__author__ = 'Thomas Perl <thp@thpinfo.com>'
__version__ = '0.6'
class JabberBot(object):
command_prefix = 'bot_'
def __init__( self, jid, password, res = None):
"""Initializes the jabber bot and sets up commands."""
self.jid = xmpp.JID( jid)
self.password = password
self.res = (res or self.__class__.__name__)
self.conn = None
self.__finished = False
self.commands = { 'help': self.help_callback, }
for (name, value) in inspect.getmembers( self):
if inspect.ismethod( value) and name.startswith( self.command_prefix):
self.commands[name[len(self.command_prefix):]] = value
def log( self, s):
"""Logging facility, can be overridden in subclasses to log to file, etc.."""
print '%s: %s' % ( self.__class__.__name__, s, )
def connect( self):
if not self.conn:
conn = xmpp.Client( self.jid.getDomain(), debug = [])
if not conn.connect():
self.log( 'unable to connect to server.')
return None
if not conn.auth( self.jid.getNode(), self.password, self.res):
self.log( 'unable to authorize with server.')
return None
conn.RegisterHandler( 'message', self.callback_message)
conn.RegisterHandler('presence', self.subscribe_handler, 'subscribe')
conn.RegisterHandler('presence', self.unsubscribed_handler, 'unsubscribed')
conn.sendInitPresence()
self.conn = conn
return self.conn
def quit( self):
"""Stop serving messages and exit.
I find it is handy for development to run the
jabberbot in a 'while true' loop in the shell, so
whenever I make a code change to the bot, I send
the 'reload' command, which I have mapped to call
self.quit(), and my shell script relaunches the
new version.
"""
self.__finished = True
def send( self, user, text, in_reply_to = None):
"""Sends a simple message to the specified user."""
mess = xmpp.Message( user, text)
if in_reply_to:
mess.setThread( in_reply_to.getThread())
mess.setType( in_reply_to.getType())
self.connect().send( mess)
def callback_message( self, conn, mess):
"""Messages sent to the bot will arrive here. Command handling + routing is done in this function."""
text = mess.getBody()
# If a message format is not supported (eg. encrypted), txt will be None
if not text:
return
if ' ' in text:
command, args = text.split(' ',1)
else:
command, args = text,''
cmd = command.lower()
if self.commands.has_key(cmd):
reply = self.commands[cmd]( mess, args)
else:
unk_str = 'Unknown command: "%s". Type "help" for available commands.' % cmd
reply = self.unknown_command( mess, cmd, args) or unk_str
if reply:
self.send( mess.getFrom(), reply, mess)
def unknown_command( self, mess, cmd, args):
"""Default handler for unknown commands
Override this method in derived class if you
want to trap some unrecognized commands. If
'cmd' is handled, you must return some non-false
value, else some helpful text will be sent back
to the sender.
"""
return None
def help_callback( self, mess, args):
"""Returns a help string listing available options. Automatically assigned to the "help" command."""
usage = '\n'.join(sorted(['%s: %s' % (name, command.__doc__ or '(undocumented)') for (name, command) in self.commands.items() if name != 'help' and (not command.__doc__ or not command.__doc__.startswith('HIDDEN'))]))
if self.__doc__:
description = self.__doc__.strip()
else:
description = 'Available commands:'
return '%s\n\n%s' % ( description, usage, )
def idle_proc( self):
"""This function will be called in the main loop."""
pass
def serve_forever( self, connect_callback = None, disconnect_callback = None):
"""Connects to the server and handles messages."""
conn = self.connect()
if conn:
self.log('bot connected. serving forever.')
else:
self.log('could not connect to server - aborting.')
return
if connect_callback:
connect_callback()
while not self.__finished:
try:
conn.Process(1)
self.idle_proc()
except KeyboardInterrupt:
self.log('bot stopped by user request. shutting down.')
break
if disconnect_callback:
disconnect_callback()
| |
import numpy as np
import pytest
from pandas.core.dtypes.concat import union_categoricals
import pandas as pd
from pandas import Categorical, CategoricalIndex, Series
from pandas.util import testing as tm
class TestUnionCategoricals:
def test_union_categorical(self):
# GH 13361
data = [
(list("abc"), list("abd"), list("abcabd")),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(
["b", "b", np.nan, "a"],
["a", np.nan, "c"],
["b", "b", np.nan, "a", "a", np.nan, "c"],
),
(
pd.date_range("2014-01-01", "2014-01-05"),
pd.date_range("2014-01-06", "2014-01-07"),
pd.date_range("2014-01-01", "2014-01-07"),
),
(
pd.date_range("2014-01-01", "2014-01-05", tz="US/Central"),
pd.date_range("2014-01-06", "2014-01-07", tz="US/Central"),
pd.date_range("2014-01-01", "2014-01-07", tz="US/Central"),
),
(
pd.period_range("2014-01-01", "2014-01-05"),
pd.period_range("2014-01-06", "2014-01-07"),
pd.period_range("2014-01-01", "2014-01-07"),
),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)), box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected, check_category_order=True)
# new categories ordered by appearance
s = Categorical(["x", "y", "z"])
s2 = Categorical(["a", "b", "c"])
result = union_categoricals([s, s2])
expected = Categorical(
["x", "y", "z", "a", "b", "c"], categories=["x", "y", "z", "a", "b", "c"]
)
tm.assert_categorical_equal(result, expected)
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = "dtype of categories must be the same"
with pytest.raises(TypeError, match=msg):
union_categoricals([s, s2])
msg = "No Categoricals to union"
with pytest.raises(ValueError, match=msg):
union_categoricals([])
def test_union_categoricals_nan(self):
# GH 13759
res = union_categoricals(
[pd.Categorical([1, 2, np.nan]), pd.Categorical([3, 2, np.nan])]
)
exp = Categorical([1, 2, np.nan, 3, 2, np.nan])
tm.assert_categorical_equal(res, exp)
res = union_categoricals(
[pd.Categorical(["A", "B"]), pd.Categorical(["B", "B", np.nan])]
)
exp = Categorical(["A", "B", "B", "B", np.nan])
tm.assert_categorical_equal(res, exp)
val1 = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-03-01"), pd.NaT]
val2 = [pd.NaT, pd.Timestamp("2011-01-01"), pd.Timestamp("2011-02-01")]
res = union_categoricals([pd.Categorical(val1), pd.Categorical(val2)])
exp = Categorical(
val1 + val2,
categories=[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-03-01"),
pd.Timestamp("2011-02-01"),
],
)
tm.assert_categorical_equal(res, exp)
# all NaN
res = union_categoricals(
[
pd.Categorical(np.array([np.nan, np.nan], dtype=object)),
pd.Categorical(["X"]),
]
)
exp = Categorical([np.nan, np.nan, "X"])
tm.assert_categorical_equal(res, exp)
res = union_categoricals(
[pd.Categorical([np.nan, np.nan]), pd.Categorical([np.nan, np.nan])]
)
exp = Categorical([np.nan, np.nan, np.nan, np.nan])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_empty(self):
# GH 13759
res = union_categoricals([pd.Categorical([]), pd.Categorical([])])
exp = Categorical([])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([Categorical([]), Categorical(["1"])])
exp = Categorical(["1"])
tm.assert_categorical_equal(res, exp)
def test_union_categorical_same_category(self):
# check fastpath
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([3, 2, 1, np.nan], categories=[1, 2, 3, 4])
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, 4, 3, 2, 1, np.nan], categories=[1, 2, 3, 4])
tm.assert_categorical_equal(res, exp)
c1 = Categorical(["z", "z", "z"], categories=["x", "y", "z"])
c2 = Categorical(["x", "x", "x"], categories=["x", "y", "z"])
res = union_categoricals([c1, c2])
exp = Categorical(["z", "z", "z", "x", "x", "x"], categories=["x", "y", "z"])
tm.assert_categorical_equal(res, exp)
def test_union_categorical_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19096
c1 = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
c2 = Categorical(["a", "b", "c"], categories=["b", "a", "c"])
result = union_categoricals([c1, c2])
expected = Categorical(
["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"]
)
tm.assert_categorical_equal(result, expected)
def test_union_categoricals_ordered(self):
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
msg = "Categorical.ordered must be the same"
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2])
res = union_categoricals([c1, c1])
exp = Categorical([1, 2, 3, 1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, np.nan, 3, 2], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
msg = "to union ordered Categoricals, all categories must be the same"
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2])
def test_union_categoricals_ignore_order(self):
# GH 15219
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
msg = "Categorical.ordered must be the same"
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2], ignore_order=False)
res = union_categoricals([c1, c1], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c1, c1], ignore_order=False)
exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, np.nan, 3, 2])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c2, c1], ignore_order=True, sort_categories=True)
exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([4, 5, 6], ordered=True)
result = union_categoricals([c1, c2], ignore_order=True)
expected = Categorical([1, 2, 3, 4, 5, 6])
tm.assert_categorical_equal(result, expected)
msg = "to union ordered Categoricals, all categories must be the same"
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2], ignore_order=False)
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2])
def test_union_categoricals_sort(self):
# GH 13846
c1 = Categorical(["x", "y", "z"])
c2 = Categorical(["a", "b", "c"])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(
["x", "y", "z", "a", "b", "c"], categories=["a", "b", "c", "x", "y", "z"]
)
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(["a", "b"], categories=["b", "a", "c"])
c2 = Categorical(["b", "c"], categories=["b", "a", "c"])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(["a", "b", "b", "c"], categories=["a", "b", "c"])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(["a", "b"], categories=["c", "a", "b"])
c2 = Categorical(["b", "c"], categories=["c", "a", "b"])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(["a", "b", "b", "c"], categories=["a", "b", "c"])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(["a", "b"], categories=["a", "b", "c"])
c2 = Categorical(["b", "c"], categories=["a", "b", "c"])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(["a", "b", "b", "c"], categories=["a", "b", "c"])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(["x", np.nan])
c2 = Categorical([np.nan, "b"])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(["x", np.nan, np.nan, "b"], categories=["b", "x"])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([np.nan, np.nan])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([])
c2 = Categorical([])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(["b", "a"], categories=["b", "a", "c"], ordered=True)
c2 = Categorical(["a", "c"], categories=["b", "a", "c"], ordered=True)
with pytest.raises(TypeError):
union_categoricals([c1, c2], sort_categories=True)
def test_union_categoricals_sort_false(self):
# GH 13846
c1 = Categorical(["x", "y", "z"])
c2 = Categorical(["a", "b", "c"])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(
["x", "y", "z", "a", "b", "c"], categories=["x", "y", "z", "a", "b", "c"]
)
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(["a", "b"], categories=["b", "a", "c"])
c2 = Categorical(["b", "c"], categories=["b", "a", "c"])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(["a", "b", "b", "c"], categories=["b", "a", "c"])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(["a", "b"], categories=["a", "b", "c"])
c2 = Categorical(["b", "c"], categories=["a", "b", "c"])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(["a", "b", "b", "c"], categories=["a", "b", "c"])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(["x", np.nan])
c2 = Categorical([np.nan, "b"])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(["x", np.nan, np.nan, "b"], categories=["x", "b"])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical([np.nan, np.nan])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([])
c2 = Categorical([])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical([])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(["b", "a"], categories=["b", "a", "c"], ordered=True)
c2 = Categorical(["a", "c"], categories=["b", "a", "c"], ordered=True)
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(
["b", "a", "a", "c"], categories=["b", "a", "c"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_union_categorical_unwrap(self):
# GH 14173
c1 = Categorical(["a", "b"])
c2 = pd.Series(["b", "c"], dtype="category")
result = union_categoricals([c1, c2])
expected = Categorical(["a", "b", "b", "c"])
tm.assert_categorical_equal(result, expected)
c2 = CategoricalIndex(c2)
result = union_categoricals([c1, c2])
tm.assert_categorical_equal(result, expected)
c1 = Series(c1)
result = union_categoricals([c1, c2])
tm.assert_categorical_equal(result, expected)
with pytest.raises(TypeError):
union_categoricals([c1, ["a", "b", "c"]])
| |
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import logging
import datetime
from io import BytesIO
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .exceptions import HTTPError, RequestException, MissingSchema, InvalidURL
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len)
from .compat import (
cookielib, urlparse, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
log = logging.getLogger(__name__)
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but abritrary
if parameters are supplied as a dict.
"""
if (not files) or isinstance(data, str):
return None
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
else:
fn, fp, ft = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, str):
fp = StringIO(fp)
if isinstance(fp, bytes):
fp = BytesIO(fp)
if ft:
new_v = (fn, fp.read(), ft)
else:
new_v = (fn, fp.read())
new_fields.append((k, new_v))
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach the request. If a dictionary is provided, form-encoding will take place.
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=dict(),
params=dict(),
auth=None,
cookies=None,
hooks=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.params = params
self.auth = auth
self.cookies = cookies
self.hooks = hooks
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare_method(self.method)
p.prepare_url(self.url, self.params)
p.prepare_headers(self.headers)
p.prepare_cookies(self.cookies)
p.prepare_body(self.data, self.files)
p.prepare_auth(self.auth, self.url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
p.prepare_hooks(self.hooks)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
try:
url = unicode(url)
except NameError:
# We're on Python 3.
url = str(url)
except UnicodeDecodeError:
pass
# Support for unicode domain names and paths.
scheme, auth, host, port, path, query, fragment = parse_url(url)
if not scheme:
raise MissingSchema("Invalid URL %r: No schema supplied" % url)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
headers = dict((name.encode('ascii'), value) for name, value in headers.items())
self.headers = CaseInsensitiveDict(headers)
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
is_stream = False
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, basestring),
not isinstance(data, list),
not isinstance(data, dict)
])
try:
length = super_len(data)
except (TypeError, AttributeError):
length = False
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
# Check if file, fo, generator, iterator.
# If not, run through normal process.
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = str(l)
elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
cookies = cookies
else:
cookies = cookiejar_from_dict(cookies)
if 'cookie' not in self.headers:
cookie_header = get_cookie_header(cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Requires that ``stream=True` on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except RequestException:
return False
return True
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the lovely Charade library
(Thanks, Ian!)."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
"""
if self._content_consumed:
# simulate reading small chunks of the content
return iter_slices(self._content, chunk_size)
def generate():
while 1:
chunk = self.raw.read(chunk_size, decode_content=True)
if not chunk:
break
yield chunk
self._content_consumed = True
gen = generate()
if decode_unicode:
gen = stream_decode_response_unicode(gen, self)
return gen
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size,
decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
if Response.encoding is None and chardet module is available, encoding
will be guessed.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
return json.loads(self.content.decode(encoding), **kwargs)
return json.loads(self.text or self.content, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
return self.raw.release_conn()
| |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
# Add python-bitcoinrpc to module search path:
import os
import sys
import time # MVF-Core
import random # MVF-Core
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
assert_equal,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
wait_bitcoinds,
enable_coverage,
check_json_precision,
initialize_chain_clean,
)
from authproxy import AuthServiceProxy, JSONRPCException
class BitcoinTestFramework(object):
# These may be over-ridden by subclasses:
def run_test(self):
for node in self.nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network(False)
def main(self):
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing bitcoind/bitcoin-cli (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
# MVF-Core begin added for tests using randomness (e.g. mvf-core-retarget.py)
parser.add_option("--randomseed", dest="randomseed",
help="Set RNG seed for tests that use randomness (ignored otherwise)")
# MVF-Core end
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# MVF-Core begin added for tests using randomness (e.g. mvf-bu-retarget.py)
if self.options.randomseed:
self.randomseed = int(self.options.randomseed)
else:
self.randomseed = time.time()
random.seed(self.randomseed)
# MVF-Core end
if self.options.trace_rpc:
import logging
logging.basicConfig(level=logging.DEBUG)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: "+e.message)
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
wait_bitcoinds()
else:
print("Note: bitcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
# Can override the num_nodes variable to indicate how many nodes to run.
def __init__(self):
self.num_nodes = 2
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to use for reference nodes (if any)")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.octavia import octavia_base
from heat.engine import support
from heat.engine import translation
class Pool(octavia_base.OctaviaBase):
"""A resource for managing Octavia Pools.
This resources manages octavia LBaaS Pools, which represent a group
of nodes. Pools define the subnet where nodes reside, balancing algorithm,
and the nodes themselves.
"""
PROPERTIES = (
ADMIN_STATE_UP, DESCRIPTION, SESSION_PERSISTENCE, NAME,
LB_ALGORITHM, LISTENER, LOADBALANCER, PROTOCOL,
SESSION_PERSISTENCE_TYPE, SESSION_PERSISTENCE_COOKIE_NAME,
TLS_ENABLED,
) = (
'admin_state_up', 'description', 'session_persistence', 'name',
'lb_algorithm', 'listener', 'loadbalancer', 'protocol',
'type', 'cookie_name', 'tls_enabled',
)
SESSION_PERSISTENCE_TYPES = (
SOURCE_IP, HTTP_COOKIE, APP_COOKIE
) = (
'SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE'
)
SUPPORTED_PROTOCOLS = (TCP, HTTP, HTTPS, TERMINATED_HTTPS, PROXY, UDP) = (
'TCP', 'HTTP', 'HTTPS', 'TERMINATED_HTTPS', 'PROXY', 'UDP')
ATTRIBUTES = (
HEALTHMONITOR_ID_ATTR, LISTENERS_ATTR, MEMBERS_ATTR
) = (
'healthmonitor_id', 'listeners', 'members'
)
properties_schema = {
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of this pool.'),
default=True,
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of this pool.'),
update_allowed=True,
default=''
),
SESSION_PERSISTENCE: properties.Schema(
properties.Schema.MAP,
_('Configuration of session persistence.'),
schema={
SESSION_PERSISTENCE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Method of implementation of session '
'persistence feature.'),
required=True,
constraints=[constraints.AllowedValues(
SESSION_PERSISTENCE_TYPES
)]
),
SESSION_PERSISTENCE_COOKIE_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the cookie, '
'required if type is APP_COOKIE.')
)
},
update_allowed=True,
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of this pool.'),
update_allowed=True
),
LB_ALGORITHM: properties.Schema(
properties.Schema.STRING,
_('The algorithm used to distribute load between the members of '
'the pool.'),
required=True,
constraints=[
constraints.AllowedValues(['ROUND_ROBIN', 'LEAST_CONNECTIONS',
'SOURCE_IP', 'SOURCE_IP_PORT']),
],
update_allowed=True,
),
LISTENER: properties.Schema(
properties.Schema.STRING,
_('Listener name or ID to be associated with this pool.'),
constraints=[
constraints.CustomConstraint('octavia.listener')
]
),
LOADBALANCER: properties.Schema(
properties.Schema.STRING,
_('Loadbalancer name or ID to be associated with this pool.'),
constraints=[
constraints.CustomConstraint('octavia.loadbalancer')
],
),
PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Protocol of the pool.'),
required=True,
constraints=[
constraints.AllowedValues(SUPPORTED_PROTOCOLS),
]
),
TLS_ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Enable backend member re-encryption.'),
default=False,
update_allowed=True,
support_status=support.SupportStatus(version='14.0.0'),
),
}
attributes_schema = {
HEALTHMONITOR_ID_ATTR: attributes.Schema(
_('ID of the health monitor associated with this pool.'),
type=attributes.Schema.STRING
),
LISTENERS_ATTR: attributes.Schema(
_('Listener associated with this pool.'),
type=attributes.Schema.STRING
),
MEMBERS_ATTR: attributes.Schema(
_('Members associated with this pool.'),
cache_mode=attributes.Schema.CACHE_NONE,
type=attributes.Schema.LIST
),
}
def translation_rules(self, props):
return [
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.LISTENER],
client_plugin=self.client_plugin(),
finder='get_listener',
),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.LOADBALANCER],
client_plugin=self.client_plugin(),
finder='get_loadbalancer',
),
]
def _prepare_args(self, properties):
props = dict((k, v) for k, v in properties.items() if v is not None)
if self.NAME not in props:
props[self.NAME] = self.physical_resource_name()
if self.LISTENER in props:
props['listener_id'] = props.pop(self.LISTENER)
if self.LOADBALANCER in props:
props['loadbalancer_id'] = props.pop(self.LOADBALANCER)
self._prepare_session_persistence(props)
return props
def _prepare_session_persistence(self, props):
session_p = props.get(self.SESSION_PERSISTENCE)
if session_p is not None:
session_props = dict(
(k, v) for k, v in session_p.items() if v is not None)
props[self.SESSION_PERSISTENCE] = session_props
def validate(self):
super(Pool, self).validate()
if (self.properties[self.LISTENER] is None and
self.properties[self.LOADBALANCER] is None):
raise exception.PropertyUnspecifiedError(self.LISTENER,
self.LOADBALANCER)
if self.properties[self.SESSION_PERSISTENCE] is not None:
session_p = self.properties[self.SESSION_PERSISTENCE]
persistence_type = session_p[self.SESSION_PERSISTENCE_TYPE]
if persistence_type == self.APP_COOKIE:
if not session_p.get(self.SESSION_PERSISTENCE_COOKIE_NAME):
msg = (_('Property %(cookie)s is required when %(sp)s '
'type is set to %(app)s.') %
{'cookie': self.SESSION_PERSISTENCE_COOKIE_NAME,
'sp': self.SESSION_PERSISTENCE,
'app': self.APP_COOKIE})
raise exception.StackValidationFailed(message=msg)
elif persistence_type == self.SOURCE_IP:
if session_p.get(self.SESSION_PERSISTENCE_COOKIE_NAME):
msg = (_('Property %(cookie)s must NOT be specified when '
'%(sp)s type is set to %(ip)s.') %
{'cookie': self.SESSION_PERSISTENCE_COOKIE_NAME,
'sp': self.SESSION_PERSISTENCE,
'ip': self.SOURCE_IP})
raise exception.StackValidationFailed(message=msg)
def _resource_create(self, properties):
return self.client().pool_create(json={'pool': properties})['pool']
def _resource_update(self, prop_diff):
props = dict((k, v) for k, v in prop_diff.items() if v is not None)
self._prepare_session_persistence(props)
self.client().pool_set(self.resource_id, json={'pool': props})
def _resource_delete(self):
self.client().pool_delete(self.resource_id)
def _show_resource(self):
return self.client().pool_show(self.resource_id)
def resource_mapping():
return {
'OS::Octavia::Pool': Pool,
}
| |
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
from glue.core import Data
from glue.core.message import SettingsChangeMessage
from glue.core.client import Client
from glue.core.layer_artist import LayerArtistContainer
from glue.utils.matplotlib import freeze_margins
__all__ = ['VizClient', 'GenericMplClient']
class VizClient(Client):
"""
The VizClient class provides an interface (and minimal
implementation) for a generic client that creates
visualizations. The goal of VizClient is to provide a reusable way
to organize client plotting code.
Clients which extend VizClient should override the following methods
to perform specific visualization tasks
* _update_axis_labels
* _update_data_plot
* _update_subset_single
* _redraw
* init_layer
VizClient provides a public refresh() method that calls all of
these methods.
Attributes
----------
options: A dictionary of global plot options, to be handled by
subclasses.
"""
def __init__(self, data, options=None):
Client.__init__(self, data)
if not options:
self.options = {}
else:
self.options = options
def _add_data(self, message):
pass
def _remove_data(self, message):
pass
def _update_data(self, message):
"""
Method to handle messages sent by the dataset. Refreshes the display.
"""
self._update_data_plot()
self.refresh()
def _add_subset(self, message):
"""
Method to handle messages sent when subsets are created.
"""
s = message.subset
self.init_layer(s)
self._redraw()
def _update_subset(self, message):
"""
Method to handle messages sent when subsets are modified.
The plot properties of the modified subset are refreshed.
"""
s = message.subset
self._update_subset_single(s, redraw=True)
def refresh(self):
"""
Update and redraw all plot information.
"""
self._update_data_plot()
self._update_subset_plots()
self._update_axis_labels()
self._redraw()
def _redraw(self):
"""
Redraw, but do not update, plot information
"""
raise NotImplementedError("VizClient cannot draw!")
def _update_axis_labels(self):
"""
Sync the axis labels to reflect which components are
currently being plotted
"""
raise NotImplementedError("VizClient cannot draw!")
def _update_data_plot(self):
"""
Sync the location of the scatter points to
reflect what components are being plotted
"""
raise NotImplementedError("VizClient cannot draw!")
def _update_subset_plots(self, redraw=False):
"""
Sync the location and visual properties
of each point in each subset
"""
junk = [self._update_subset_single(s) for d in self.data
for s in d.subsets]
if redraw:
self._redraw()
def _update_subset_single(self, s, redraw=False):
"""
Update the properties of a subset
Parameters
----------
s: A subset instance
The subset to refresh.
"""
raise NotImplementedError("VizClient Cannot Draw!")
def init_layer(self, layer):
"""Initialize a plot of a data or subset object for the first time.
Parameters
----------
layer: Data or subset instance
"""
raise NotImplementedError()
def set_background_color(axes, color):
axes.figure.set_facecolor(color)
axes.patch.set_facecolor(color)
def set_foreground_color(axes, color):
if hasattr(axes, 'coords'):
axes.coords.frame.set_color(color)
for coord in axes.coords:
coord.set_ticks(color=color)
coord.set_ticklabel(color=color)
coord.axislabels.set_color(color)
else:
for spine in axes.spines.values():
spine.set_color(color)
axes.tick_params(color=color,
labelcolor=color)
axes.xaxis.label.set_color(color)
axes.yaxis.label.set_color(color)
def update_appearance_from_settings(axes):
from glue.config import settings
set_background_color(axes, settings.BACKGROUND_COLOR)
set_foreground_color(axes, settings.FOREGROUND_COLOR)
def init_mpl(figure=None, axes=None, wcs=False, axes_factory=None):
if (axes is not None and figure is not None and
axes.figure is not figure):
raise ValueError("Axes and figure are incompatible")
try:
from glue.external.wcsaxes import WCSAxesSubplot
except ImportError:
WCSAxesSubplot = None
if axes is not None:
_axes = axes
_figure = axes.figure
else:
_figure = figure or plt.figure()
if wcs and WCSAxesSubplot is not None:
_axes = WCSAxesSubplot(_figure, 111)
_figure.add_axes(_axes)
else:
if axes_factory is not None:
_axes = axes_factory(_figure)
else:
_axes = _figure.add_subplot(1, 1, 1)
freeze_margins(_axes, margins=[1, 0.25, 0.50, 0.25])
update_appearance_from_settings(_axes)
return _figure, _axes
class GenericMplClient(Client):
"""
This client base class handles the logic of adding, removing,
and updating layers.
Subsets are auto-added and removed with datasets.
New subsets are auto-added iff the data has already been added
"""
def __init__(self, data=None, figure=None, axes=None,
layer_artist_container=None, axes_factory=None):
super(GenericMplClient, self).__init__(data=data)
if axes_factory is None:
axes_factory = self.create_axes
figure, self.axes = init_mpl(figure, axes, axes_factory=axes_factory)
self.artists = layer_artist_container
if self.artists is None:
self.artists = LayerArtistContainer()
self._connect()
def create_axes(self, figure):
return figure.add_subplot(1, 1, 1)
def _connect(self):
pass
@property
def collect(self):
# a better name
return self.data
def _redraw(self):
self.axes.figure.canvas.draw()
def new_layer_artist(self, layer):
raise NotImplementedError
def apply_roi(self, roi):
raise NotImplementedError
def _update_layer(self, layer):
raise NotImplementedError
def add_layer(self, layer):
"""
Add a new Data or Subset layer to the plot.
Returns the created layer artist
:param layer: The layer to add
:type layer: :class:`~glue.core.data.Data` or :class:`~glue.core.subset.Subset`
"""
if layer.data not in self.collect:
return
if layer in self.artists:
return self.artists[layer][0]
result = self.new_layer_artist(layer)
self.artists.append(result)
self._update_layer(layer)
self.add_layer(layer.data)
for s in layer.data.subsets:
self.add_layer(s)
if layer.data is layer: # Added Data object. Relimit view
self.axes.autoscale_view(True, True, True)
return result
def remove_layer(self, layer):
if layer not in self.artists:
return
self.artists.pop(layer)
if isinstance(layer, Data):
list(map(self.remove_layer, layer.subsets))
self._redraw()
def set_visible(self, layer, state):
"""
Toggle a layer's visibility
:param layer: which layer to modify
:param state: True or False
"""
def _update_all(self):
for layer in self.artists.layers:
self._update_layer(layer)
def __contains__(self, layer):
return layer in self.artists
# Hub message handling
def _add_subset(self, message):
self.add_layer(message.sender)
def _remove_subset(self, message):
self.remove_layer(message.sender)
def _update_subset(self, message):
self._update_layer(message.sender)
def _update_data(self, message):
self._update_layer(message.sender)
def _remove_data(self, message):
self.remove_layer(message.data)
def register_to_hub(self, hub):
super(GenericMplClient, self).register_to_hub(hub)
def is_appearance_settings(msg):
return ('BACKGROUND_COLOR' in msg.settings
or 'FOREGROUND_COLOR' in msg.settings)
hub.subscribe(self, SettingsChangeMessage,
self._update_appearance_from_settings,
filter=is_appearance_settings)
def _update_appearance_from_settings(self, message):
update_appearance_from_settings(self.axes)
self._redraw()
def restore_layers(self, layers, context):
""" Re-generate plot layers from a glue-serialized list"""
for l in layers:
l.pop('_type')
props = dict((k, context.object(v)) for k, v in l.items())
layer = self.add_layer(props['layer'])
layer.properties = props
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for Residual Networks.
Residual networks ('v1' ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant was introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer
rather than after.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-5
DEFAULT_VERSION = 2
DEFAULT_DTYPE = tf.float32
CASTABLE_TYPES = (tf.float16,)
ALLOWED_TYPES = (DEFAULT_DTYPE,) + CASTABLE_TYPES
################################################################################
# Convenience functions for building the ResNet model.
################################################################################
def batch_norm(inputs, training, data_format):
"""Performs a batch normalization using a standard set of parameters."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
return tf.compat.v1.layers.batch_normalization(
inputs=inputs, axis=1 if data_format == 'channels_first' else 3,
momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,
scale=True, training=training, fused=True)
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(tensor=inputs,
paddings=[[0, 0], [0, 0], [pad_beg, pad_end],
[pad_beg, pad_end]])
else:
padded_inputs = tf.pad(tensor=inputs,
paddings=[[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):
"""Strided 2-D convolution with explicit padding."""
# The padding is consistent and is based only on `kernel_size`, not on the
# dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.compat.v1.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
data_format=data_format)
################################################################################
# ResNet block definitions.
################################################################################
def _building_block_v1(inputs, filters, training, projection_shortcut, strides,
data_format):
"""A single block for ResNet v1, without a bottleneck.
Convolution then batch normalization then ReLU as described by:
Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts
(typically a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
shortcut = batch_norm(inputs=shortcut, training=training,
data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=1,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs += shortcut
inputs = tf.nn.relu(inputs)
return inputs
def _building_block_v2(inputs, filters, training, projection_shortcut, strides,
data_format):
"""A single block for ResNet v2, without a bottleneck.
Batch normalization then ReLu then convolution as described by:
Identity Mappings in Deep Residual Networks
https://arxiv.org/pdf/1603.05027.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts
(typically a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=1,
data_format=data_format)
return inputs + shortcut
def _bottleneck_block_v1(inputs, filters, training, projection_shortcut,
strides, data_format):
"""A single block for ResNet v1, with a bottleneck.
Similar to _building_block_v1(), except using the "bottleneck" blocks
described in:
Convolution then batch normalization then ReLU as described by:
Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts
(typically a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
shortcut = batch_norm(inputs=shortcut, training=training,
data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=1, strides=1,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs += shortcut
inputs = tf.nn.relu(inputs)
return inputs
def _bottleneck_block_v2(inputs, filters, training, projection_shortcut,
strides, data_format):
"""A single block for ResNet v2, with a bottleneck.
Similar to _building_block_v2(), except using the "bottleneck" blocks
described in:
Convolution then batch normalization then ReLU as described by:
Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015.
Adapted to the ordering conventions of:
Batch normalization then ReLu then convolution as described by:
Identity Mappings in Deep Residual Networks
https://arxiv.org/pdf/1603.05027.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts
(typically a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=1, strides=1,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,
data_format=data_format)
return inputs + shortcut
def block_layer(inputs, filters, bottleneck, block_fn, blocks, strides,
training, name, data_format):
"""Creates one layer of blocks for the ResNet model.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first convolution of the layer.
bottleneck: Is the block created a bottleneck block.
block_fn: The block to use within the model, either `building_block` or
`bottleneck_block`.
blocks: The number of blocks contained in the layer.
strides: The stride to use for the first convolution of the layer. If
greater than 1, this layer will ultimately downsample the input.
training: Either True or False, whether we are currently training the
model. Needed for batch norm.
name: A string name for the tensor output of the block layer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block layer.
"""
# Bottleneck blocks end with 4x the number of filters as they start with
filters_out = filters * 4 if bottleneck else filters
def projection_shortcut(inputs):
return conv2d_fixed_padding(
inputs=inputs, filters=filters_out, kernel_size=1, strides=strides,
data_format=data_format)
# Only the first block per block_layer uses projection_shortcut and strides
inputs = block_fn(inputs, filters, training, projection_shortcut, strides,
data_format)
for _ in range(1, blocks):
inputs = block_fn(inputs, filters, training, None, 1, data_format)
return tf.identity(inputs, name)
class Model(object):
"""Base class for building the Resnet Model."""
def __init__(self, resnet_size, bottleneck, num_classes, num_filters,
kernel_size,
conv_stride, first_pool_size, first_pool_stride,
block_sizes, block_strides,
resnet_version=DEFAULT_VERSION, data_format=None,
dtype=DEFAULT_DTYPE):
"""Creates a model for classifying an image.
Args:
resnet_size: A single integer for the size of the ResNet model.
bottleneck: Use regular blocks or bottleneck blocks.
num_classes: The number of classes used as labels.
num_filters: The number of filters to use for the first block layer
of the model. This number is then doubled for each subsequent block
layer.
kernel_size: The kernel size to use for convolution.
conv_stride: stride size for the initial convolutional layer
first_pool_size: Pool size to be used for the first pooling layer.
If none, the first pooling layer is skipped.
first_pool_stride: stride size for the first pooling layer. Not used
if first_pool_size is None.
block_sizes: A list containing n values, where n is the number of sets of
block layers desired. Each value should be the number of blocks in the
i-th set.
block_strides: List of integers representing the desired stride size for
each of the sets of block layers. Should be same length as block_sizes.
resnet_version: Integer representing which version of the ResNet network
to use. See README for details. Valid values: [1, 2]
data_format: Input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
dtype: The TensorFlow dtype to use for calculations. If not specified
tf.float32 is used.
Raises:
ValueError: if invalid version is selected.
"""
self.resnet_size = resnet_size
if not data_format:
data_format = (
'channels_first' if tf.test.is_built_with_cuda() else 'channels_last')
self.resnet_version = resnet_version
if resnet_version not in (1, 2):
raise ValueError(
'Resnet version should be 1 or 2. See README for citations.')
self.bottleneck = bottleneck
if bottleneck:
if resnet_version == 1:
self.block_fn = _bottleneck_block_v1
else:
self.block_fn = _bottleneck_block_v2
else:
if resnet_version == 1:
self.block_fn = _building_block_v1
else:
self.block_fn = _building_block_v2
if dtype not in ALLOWED_TYPES:
raise ValueError('dtype must be one of: {}'.format(ALLOWED_TYPES))
self.data_format = data_format
self.num_classes = num_classes
self.num_filters = num_filters
self.kernel_size = kernel_size
self.conv_stride = conv_stride
self.first_pool_size = first_pool_size
self.first_pool_stride = first_pool_stride
self.block_sizes = block_sizes
self.block_strides = block_strides
self.dtype = dtype
self.pre_activation = resnet_version == 2
def _custom_dtype_getter(self, getter, name, shape=None, dtype=DEFAULT_DTYPE,
*args, **kwargs):
"""Creates variables in fp32, then casts to fp16 if necessary.
This function is a custom getter. A custom getter is a function with the
same signature as tf.get_variable, except it has an additional getter
parameter. Custom getters can be passed as the `custom_getter` parameter of
tf.variable_scope. Then, tf.get_variable will call the custom getter,
instead of directly getting a variable itself. This can be used to change
the types of variables that are retrieved with tf.get_variable.
The `getter` parameter is the underlying variable getter, that would have
been called if no custom getter was used. Custom getters typically get a
variable with `getter`, then modify it in some way.
This custom getter will create an fp32 variable. If a low precision
(e.g. float16) variable was requested it will then cast the variable to the
requested dtype. The reason we do not directly create variables in low
precision dtypes is that applying small gradients to such variables may
cause the variable not to change.
Args:
getter: The underlying variable getter, that has the same signature as
tf.get_variable and returns a variable.
name: The name of the variable to get.
shape: The shape of the variable to get.
dtype: The dtype of the variable to get. Note that if this is a low
precision dtype, the variable will be created as a tf.float32 variable,
then cast to the appropriate dtype
*args: Additional arguments to pass unmodified to getter.
**kwargs: Additional keyword arguments to pass unmodified to getter.
Returns:
A variable which is cast to fp16 if necessary.
"""
if dtype in CASTABLE_TYPES:
var = getter(name, shape, tf.float32, *args, **kwargs)
return tf.cast(var, dtype=dtype, name=name + '_cast')
else:
return getter(name, shape, dtype, *args, **kwargs)
def _model_variable_scope(self):
"""Returns a variable scope that the model should be created under.
If self.dtype is a castable type, model variable will be created in fp32
then cast to self.dtype before being used.
Returns:
A variable scope for the model.
"""
return tf.compat.v1.variable_scope('resnet_model',
custom_getter=self._custom_dtype_getter)
def __call__(self, inputs, training):
"""Add operations to classify a batch of input images.
Args:
inputs: A Tensor representing a batch of input images.
training: A boolean. Set to True to add operations required only when
training the classifier.
Returns:
A logits Tensor with shape [<batch_size>, self.num_classes].
"""
with self._model_variable_scope():
if self.data_format == 'channels_first':
# Convert the inputs from channels_last (NHWC) to channels_first (NCHW).
# This provides a large performance boost on GPU. See
# https://www.tensorflow.org/performance/performance_guide#data_formats
inputs = tf.transpose(a=inputs, perm=[0, 3, 1, 2])
inputs = conv2d_fixed_padding(
inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size,
strides=self.conv_stride, data_format=self.data_format)
inputs = tf.identity(inputs, 'initial_conv')
# We do not include batch normalization or activation functions in V2
# for the initial conv1 because the first ResNet unit will perform these
# for both the shortcut and non-shortcut paths as part of the first
# block's projection. Cf. Appendix of [2].
if self.resnet_version == 1:
inputs = batch_norm(inputs, training, self.data_format)
inputs = tf.nn.relu(inputs)
if self.first_pool_size:
inputs = tf.compat.v1.layers.max_pooling2d(
inputs=inputs, pool_size=self.first_pool_size,
strides=self.first_pool_stride, padding='SAME',
data_format=self.data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
for i, num_blocks in enumerate(self.block_sizes):
num_filters = self.num_filters * (2**i)
inputs = block_layer(
inputs=inputs, filters=num_filters, bottleneck=self.bottleneck,
block_fn=self.block_fn, blocks=num_blocks,
strides=self.block_strides[i], training=training,
name='block_layer{}'.format(i + 1), data_format=self.data_format)
# Only apply the BN and ReLU for model that does pre_activation in each
# building/bottleneck block, eg resnet V2.
if self.pre_activation:
inputs = batch_norm(inputs, training, self.data_format)
inputs = tf.nn.relu(inputs)
# The current top layer has shape
# `batch_size x pool_size x pool_size x final_size`.
# ResNet does an Average Pooling layer over pool_size,
# but that is the same as doing a reduce_mean. We do a reduce_mean
# here because it performs better than AveragePooling2D.
axes = [2, 3] if self.data_format == 'channels_first' else [1, 2]
inputs = tf.reduce_mean(input_tensor=inputs, axis=axes, keepdims=True)
inputs = tf.identity(inputs, 'final_reduce_mean')
inputs = tf.squeeze(inputs, axes)
inputs = tf.compat.v1.layers.dense(inputs=inputs, units=self.num_classes)
inputs = tf.identity(inputs, 'final_dense')
return inputs
| |
#!/usr/bin/env python
__author__ = 'Dave Foster <dfoster@asascience.com>'
from nose.plugins.attrib import attr
from mock import Mock, sentinel, patch, ANY, call, MagicMock
from gevent import event, spawn
import unittest
from zope.interface.declarations import implements
from zope.interface.interface import Interface
from gevent import sleep
from pyon.util.int_test import IonIntegrationTestCase
from pyon.util.unit_test import PyonTestCase
from pyon.core import exception
from pyon.core.exception import BadRequest
from pyon.core.bootstrap import get_sys_name, CFG
from pyon.container.cc import Container
from pyon.core.interceptor.interceptor import Invocation
from pyon.net.channel import BaseChannel, SendChannel, BidirClientChannel, SubscriberChannel, ChannelClosedError, ServerChannel, RecvChannel, ListenChannel
from pyon.net.endpoint import EndpointUnit, BaseEndpoint, RPCServer, Subscriber, Publisher, RequestResponseClient, RequestEndpointUnit, RPCRequestEndpointUnit, RPCClient, RPCResponseEndpointUnit, EndpointError, SendingBaseEndpoint, ListeningBaseEndpoint
from pyon.net.messaging import NodeB
from pyon.ion.service import BaseService
from pyon.net.transport import NameTrio, BaseTransport
# NO INTERCEPTORS - we use these mock-like objects up top here which deliver received messages that don't go through the interceptor stack.
no_interceptors = {'message_incoming': [],
'message_outgoing': [],
'process_incoming': [],
'process_outgoing': []}
# simplify send assertions -- can only validate header contents;
# response may have stacks, the simple mock.assert_called_once_with will fail
def assert_called_once_with_header(test, mock, expected):
test.assertEqual(1, mock.call_count)
actual = mock.call_args[0][1]
test.assertEqual(expected, actual)
class TestError(StandardError):
"""
Newly defined error, used for side effects in Mock tests.
"""
pass
@attr('UNIT')
class TestEndpointUnit(PyonTestCase):
def setUp(self):
self._endpoint_unit = EndpointUnit(interceptors={})
def test_attach_channel(self):
ch = Mock(spec=BaseChannel)
self._endpoint_unit.attach_channel(ch)
self.assertTrue(self._endpoint_unit.channel is not None)
self.assertEquals(self._endpoint_unit.channel, ch)
@patch('pyon.net.endpoint.get_ion_ts', Mock(return_value=sentinel.ts))
def test_send(self):
# need a channel to send on
self.assertRaises(AttributeError, self._endpoint_unit.send, "fake")
ch = Mock(spec=SendChannel)
self._endpoint_unit.attach_channel(ch)
self._endpoint_unit.send("hi", {'header':'value'})
ch.send.assert_called_once_with('hi', {'header':'value', 'ts':sentinel.ts})
def test_close(self):
ch = Mock(spec=BaseChannel)
self._endpoint_unit.attach_channel(ch)
self._endpoint_unit.close()
ch.close.assert_called_once_with()
def test_build_header(self):
head = self._endpoint_unit._build_header({'fake': 'content'}, {})
self.assertTrue(isinstance(head, dict))
def test_build_payload(self):
fakemsg = {'fake':'content'}
msg = self._endpoint_unit._build_payload(fakemsg, {'fake':'header'})
self.assertEquals(msg, fakemsg)
def test_build_msg(self):
fakemsg = {'fake':'content'}
msg, headers = self._endpoint_unit._build_msg(fakemsg, {})
self.assertEquals(msg, fakemsg)
self.assertEquals(headers, {'ts':ANY})
def test_intercept_in(self):
self._endpoint_unit._build_invocation = Mock()
self._endpoint_unit._intercept_msg_in = Mock()
self._endpoint_unit.intercept_in(sentinel.msg, sentinel.headers)
self._endpoint_unit._build_invocation.assert_called_once_with(path=Invocation.PATH_IN,
message=sentinel.msg,
headers=sentinel.headers)
self.assertTrue(self._endpoint_unit._intercept_msg_in.called)
def test__message_received(self):
self._endpoint_unit.message_received = Mock()
self._endpoint_unit.message_received.return_value = sentinel.msg_return
retval = self._endpoint_unit._message_received(sentinel.msg, sentinel.headers)
self.assertEquals(retval, sentinel.msg_return)
self.assertTrue(self._endpoint_unit.message_received.called)
@attr('UNIT')
class TestBaseEndpoint(PyonTestCase):
def setUp(self):
self._node = Mock(spec=NodeB)
self._ef = BaseEndpoint(node=self._node)
self._ch = Mock(spec=SendChannel)
self._node.channel.return_value = self._ch
def test_create_endpoint(self):
e = self._ef.create_endpoint()
# check attrs
self.assertTrue(hasattr(e, 'channel'))
# make sure we can shut it down
e.close()
self._ch.close.assert_any_call()
def test_create_endpoint_existing_channel(self):
ch = Mock(spec=SendChannel)
e = self._ef.create_endpoint(existing_channel=ch)
self.assertEquals(e.channel, ch)
self.assertEquals(ch.connect.call_count, 0)
ch.connect("exist")
ch.connect.assert_called_once_with('exist')
e.close()
def test_create_endpoint_kwarg(self):
"""
Make sure our kwarg gets set.
"""
class OptEndpointUnit(EndpointUnit):
def __init__(self, opt=None, **kwargs):
self._opt = opt
EndpointUnit.__init__(self, **kwargs)
self._ef.endpoint_unit_type = OptEndpointUnit
e = self._ef.create_endpoint(opt="stringer")
self.assertTrue(hasattr(e, "_opt"))
self.assertEquals(e._opt, "stringer")
def test__ensure_node_errors(self):
bep = BaseEndpoint()
gcimock = Mock()
gcimock.return_value = None
with patch('pyon.net.endpoint.BaseEndpoint._get_container_instance', gcimock):
self.assertRaises(EndpointError, bep._ensure_node)
@patch('pyon.net.endpoint.BaseEndpoint._get_container_instance')
def test__ensure_node_existing_node(self, gcimock):
self._ef._ensure_node()
self.assertFalse(gcimock.called)
@patch('pyon.net.endpoint.BaseEndpoint._get_container_instance')
def test__ensure_node(self, gcimock):
bep = BaseEndpoint()
self.assertIsNone(bep.node)
bep._ensure_node()
self.assertEquals(bep.node, gcimock().node)
def test__get_container_instance(self):
c = Container() # ensure we've got an instance in Container.instance
self.assertEquals(BaseEndpoint._get_container_instance(), c)
def test_close(self):
bep = BaseEndpoint()
bep.close()
# well, it's just a pass, so nothing happens/there for us to test
@attr('UNIT')
class TestSendingBaseEndpoint(PyonTestCase):
def test_init(self):
ep = SendingBaseEndpoint(node=sentinel.node)
self.assertEquals(ep.node, sentinel.node)
self.assertIsInstance(ep._send_name, NameTrio)
def test_init_with_to_name(self):
ep = SendingBaseEndpoint(to_name=(sentinel.xp, sentinel.rkey))
self.assertEquals(ep._send_name.exchange, sentinel.xp)
self.assertEquals(ep._send_name.queue, sentinel.rkey)
def test_init_with_old_name_gives_warn(self):
ep = SendingBaseEndpoint(to_name=(sentinel.xp, sentinel.rkey))
self.assertEquals(ep._send_name.exchange, sentinel.xp)
self.assertEquals(ep._send_name.queue, sentinel.rkey)
def test_init_with_to_name_namepair(self):
class MyNameTrio(NameTrio):
def __init__(self):
self._exchange = sentinel.my_exchange
self._queue = sentinel.my_queue
ep = SendingBaseEndpoint(to_name=MyNameTrio())
self.assertEquals(ep._send_name.exchange, sentinel.my_exchange)
self.assertEquals(ep._send_name.queue, sentinel.my_queue)
def test_create_endpoint_calls_connect(self):
np = NameTrio(sentinel.xp, sentinel.queue)
ep = SendingBaseEndpoint(node=Mock(spec=NodeB), to_name=np)
e = ep.create_endpoint()
e.channel.connect.assert_called_once_with(np)
def test_create_endpoint_with_tuple(self):
ep = SendingBaseEndpoint(node=Mock(spec=NodeB))
e = ep.create_endpoint(to_name=(sentinel.ex, sentinel.name))
self.assertIsInstance(e.channel.connect.call_args[0][0], NameTrio)
def test__create_channel_sets_transport_kwarg(self):
# if send_name is a transport, it makes sure that kwarg is passed in to node's channel (and therefore the channel)
class FakeSendName(NameTrio, BaseTransport):
pass
fn = FakeSendName()
ep = SendingBaseEndpoint(node=Mock(spec=NodeB), to_name=fn)
ch = ep._create_channel()
self.assertIn('transport', ep.node.channel.call_args[1])
self.assertIn(fn, ep.node.channel.call_args[1].itervalues())
@attr('UNIT')
class TestListeningBaseEndpoint(PyonTestCase):
def test__create_channel_sets_transport_kwarg(self):
# if send_name is a transport, it makes sure that kwarg is passed in to node's channel (and therefore the channel)
class FakeSendName(NameTrio, BaseTransport):
pass
fn = FakeSendName()
ep = ListeningBaseEndpoint(node=Mock(spec=NodeB), from_name=fn)
ch = ep._create_channel()
self.assertIn('transport', ep.node.channel.call_args[1])
self.assertIn(fn, ep.node.channel.call_args[1].itervalues())
def test_get_ready_event(self):
ep = ListeningBaseEndpoint(node=Mock(spec=NodeB))
self.assertEquals(ep.get_ready_event(), ep._ready_event)
def test_close(self):
ep = ListeningBaseEndpoint(node=Mock(soec=NodeB))
ep._chan = Mock()
ep.close()
ep._chan.close.assert_called_once_with()
def test_listen_with_base_transport_for_name(self):
# make a listen loop that will exit right away
chmock = Mock(spec=ListenChannel)
chmock.accept.side_effect = ChannelClosedError
nodemock = Mock(spec=NodeB)
nodemock.channel.return_value = chmock
class FakeRecvName(BaseTransport, NameTrio):
pass
recv_name = FakeRecvName()
recv_name.setup_listener = Mock()
ep = ListeningBaseEndpoint(node=nodemock, from_name=recv_name)
ep.listen(binding=sentinel.binding)
self.assertTrue(ep.get_ready_event().is_set())
self.assertIn('transport', nodemock.channel.call_args[1])
self.assertIn(recv_name, nodemock.channel.call_args[1].itervalues())
def test_listen(self):
# make a listen loop that will exit right away
chmock = Mock(spec=ListenChannel)
chmock.accept.side_effect = ChannelClosedError
nodemock = Mock(spec=NodeB)
nodemock.channel.return_value = chmock
ep = ListeningBaseEndpoint(node=nodemock, from_name=NameTrio(sentinel.ex, sentinel.queue))
ep.listen()
chmock.setup_listener.assert_called_once_with(ep._recv_name, binding=sentinel.queue)
def test_listen_exception_in_handling(self):
# make a listen loop that will return one message (to blow up in processing)
chmock = MagicMock(spec=ListenChannel)
chmock.accept.return_value = Mock()
chmock.accept.return_value.recv = Mock(return_value=(sentinel.msg, sentinel.headers, sentinel.delivery_tag))
chmock.accept.return_value._recv_queue.qsize.return_value = 1
nodemock = Mock(spec=NodeB)
nodemock.channel.return_value = chmock
recv_name = NameTrio(sentinel.ex, sentinel.queue)
ep = ListeningBaseEndpoint(node=nodemock, from_name=recv_name)
# make msg received error out!
ep.create_endpoint = Mock(return_value=Mock(spec=EndpointUnit))
ep.create_endpoint.return_value._message_received.side_effect = TestError
ep.create_endpoint.return_value.intercept_in.return_value = (sentinel.msg, sentinel.headers)
self.assertRaises(TestError, ep.listen)
chmock.setup_listener.assert_called_once_with(recv_name, binding=sentinel.queue)
chmock.start_consume.assert_called_once_with()
chmock.accept.assert_called_once_with(n=1, timeout=None)
chmock.accept.return_value.recv.assert_called_once_with()
ep.create_endpoint.assert_called_once_with(existing_channel=chmock.accept.return_value)
def test_get_stats_no_channel(self):
ep = ListeningBaseEndpoint()
self.assertRaises(EndpointError, ep.get_stats)
def test_get_stats(self):
ep = ListeningBaseEndpoint()
ep._chan = Mock(spec=ListenChannel)
ep.get_stats()
ep._chan.get_stats.assert_called_once_with()
@attr('INT', group='COI')
class TestListeningBaseEndpointInt(IonIntegrationTestCase):
def setUp(self):
self._start_container()
def test_get_stats(self):
ep = ListeningBaseEndpoint(node=self.container.node)
gl = spawn(ep.listen, binding="test_get_stats")
ep.get_ready_event().wait(timeout=5)
gs_res = ep.get_stats()
self.assertEquals(gs_res, (0, 1)) # num of messages, num listeners
ep.close()
gl.join(timeout=5)
def test_get_stats_multiple_on_named_queue(self):
ep1 = ListeningBaseEndpoint(node=self.container.node, from_name="test_get_stats_multi")
gl1 = spawn(ep1.listen)
ep2 = ListeningBaseEndpoint(node=self.container.node, from_name="test_get_stats_multi")
gl2 = spawn(ep2.listen)
ep1.get_ready_event().wait(timeout=5)
ep2.get_ready_event().wait(timeout=5)
gs_res1 = ep1.get_stats()
self.assertEquals(gs_res1, (0, 2)) # num of messages, num listeners
gs_res2 = ep2.get_stats()
self.assertEquals(gs_res2, (0, 2)) # num of messages, num listeners
ep1.close()
ep2.close()
gl1.join(timeout=5)
gl2.join(timeout=5)
@attr('INT', group='COI')
class TestListeningBaseEndpointIntWithLocal(TestListeningBaseEndpointInt):
def setUp(self):
self._start_container()
@attr('UNIT')
class TestPublisher(PyonTestCase):
def setUp(self):
self._node = Mock(spec=NodeB)
self._pub = Publisher(node=self._node, to_name="testpub")
self._ch = Mock(spec=SendChannel)
self._node.channel.return_value = self._ch
self._node.interceptors = {}
def test_publish(self):
self.assertEquals(self._node.channel.call_count, 0)
self._pub.publish("pub")
self._node.channel.assert_called_once_with(self._pub.channel_type, transport=None)
self.assertEquals(self._ch.send.call_count, 1)
self._pub.publish("pub2")
self._node.channel.assert_called_once_with(self._pub.channel_type, transport=None)
self.assertEquals(self._ch.send.call_count, 2)
def test_publish_with_new_name(self):
self.assertEquals(self._node.channel.call_count, 0)
self._pub.publish(sentinel.msg, to_name=sentinel.to_name)
self.assertEquals(self._ch.send.call_count, 1)
self._pub.publish(sentinel.msg, to_name=sentinel.to_name)
self.assertEquals(self._ch.send.call_count, 2)
def test_close(self):
self._pub.publish(sentinel.msg)
self._pub._pub_ep.close = Mock()
self._pub.close()
self._pub._pub_ep.close.assert_called_once_with()
class RecvMockMixin(object):
"""
Helper mixin to get a properly mocked receiving channel into several tests.
"""
def _setup_mock_channel(self, ch_type=BidirClientChannel, status_code=200, error_message="no problem", value="bidirmsg", op=None):
"""
Sets up a mocked channel, ready for fake bidir communication.
@param ch_type Channel type the mock should spec to.
@param status_code The status code of the operation, relevant only for RR comms.
@param error_message The error message of the operation, relevant only for RR comms.
@param op The op name, relevant only for RR comms.
@param value The msg body to be returned.
"""
ch = MagicMock(spec=ch_type())
# set a return value for recv so we get an immediate response
vals = [(value, {'status_code':status_code, 'error_message':error_message, 'op': op, 'conv-id':sentinel.conv_id}, sentinel.delivery_tag)]
def _ret(*args, **kwargs):
if len(vals):
return vals.pop()
raise ChannelClosedError()
ch.recv.side_effect = _ret
# need to set a send_name for now
ch._send_name = NameTrio('', '')
return ch
@attr('UNIT')
class TestSubscriber(PyonTestCase, RecvMockMixin):
def setUp(self):
self._node = Mock(spec=NodeB)
self._node.interceptors = {}
def test_create_endpoint(self):
def mycb(msg, headers):
return "test"
sub = Subscriber(node=self._node, from_name="testsub", callback=mycb)
e = sub.create_endpoint()
self.assertEquals(e._callback, mycb)
def test_subscribe(self):
#Test Subscriber.
#The goal of this test is to get messages routed to the callback mock.
cbmock = Mock()
sub = Subscriber(node=self._node, from_name="testsub", callback=cbmock)
# tell the subscriber to create this as the main listening channel
listen_channel_mock = self._setup_mock_channel(ch_type=SubscriberChannel, value="subbed", error_message="")
sub.node.channel.return_value = listen_channel_mock
# tell our channel to return itself when accepted
listen_channel_mock.accept.return_value = listen_channel_mock
# we're ready! call listen
sub.listen()
# make sure we got our message
cbmock.assert_called_once_with('subbed', {'conv-id': sentinel.conv_id, 'status_code':200, 'error_message':'', 'op': None})
@attr('UNIT')
@patch('pyon.net.endpoint.BidirectionalEndpointUnit._send', Mock(return_value=(sentinel.body, {'conv-id':sentinel.conv_id})))
class TestRequestResponse(PyonTestCase, RecvMockMixin):
def setUp(self):
self._node = Mock(spec=NodeB)
def test_endpoint_send(self):
e = RequestEndpointUnit(interceptors={})
ch = self._setup_mock_channel()
e.attach_channel(ch)
retval, heads = e.send("msg")
self.assertEquals(retval, "bidirmsg")
# cleanup
e.close()
def test_endpoint_send_with_timeout(self):
e = RequestEndpointUnit()
e.channel = Mock()
e.channel.recv = lambda: sleep(5) # simulate blocking when recv is called
self.assertRaises(exception.Timeout, e._send, sentinel.msg, MagicMock(), timeout=1)
def test_rr_client(self):
rr = RequestResponseClient(node=self._node, to_name="rr")
rr.node.channel.return_value = self._setup_mock_channel()
rr.node.interceptors = {}
ret = rr.request("request")
self.assertEquals(ret, "bidirmsg")
def test_rr_server(self):
# Err, not defined at the moment.
pass
class ISimpleInterface(Interface):
"""
Defines a simple interface for testing rpc client/servers.
"""
def simple(one='', two=''):
pass
class SimpleService(BaseService):
implements(ISimpleInterface)
name = "simple"
dependencies = []
def __init__(self):
self._ar = event.AsyncResult()
def simple(self, named=None):
self._ar.set(named)
return True
@attr('UNIT')
class TestRPCRequestEndpoint(PyonTestCase, RecvMockMixin):
def test_build_msg(self):
e = RPCRequestEndpointUnit()
fakemsg = {'fake':'content'}
msg = e._build_msg(fakemsg, {})
# er in json now, how to really check
self.assertNotEquals(str(msg), str(fakemsg))
@patch('pyon.net.endpoint.RPCRequestEndpointUnit._build_conv_id', Mock(return_value=sentinel.conv_id))
def test_endpoint_send(self):
e = RPCRequestEndpointUnit(interceptors={})
ch = self._setup_mock_channel()
e.attach_channel(ch)
ret, heads = e.send("rpc call")
self.assertEquals(ret, 'bidirmsg') # we just get payload back due to success RPC code 200
e.close()
@patch('pyon.net.endpoint.RPCRequestEndpointUnit._build_conv_id', Mock(return_value=sentinel.conv_id))
def test_endpoint_send_errors(self):
errlist = [exception.BadRequest, exception.Unauthorized, exception.NotFound, exception.Timeout, exception.Conflict, exception.ServerError, exception.ServiceUnavailable]
for err in errlist:
e = RPCRequestEndpointUnit(interceptors={})
ch = self._setup_mock_channel(status_code=err.status_code, error_message=str(err.status_code))
e.attach_channel(ch)
self.assertRaises(err, e.send, {})
@attr('UNIT')
class TestRPCClient(PyonTestCase, RecvMockMixin):
@patch('pyon.net.endpoint.IonObject')
@patch('pyon.net.endpoint.RPCRequestEndpointUnit._build_conv_id', Mock(return_value=sentinel.conv_id))
def test_rpc_client(self, iomock):
node = Mock(spec=NodeB)
rpcc = RPCClient(node=node, to_name="simply", iface=ISimpleInterface)
rpcc.node.channel.return_value = self._setup_mock_channel()
rpcc.node.interceptors = {}
self.assertTrue(hasattr(rpcc, 'simple'))
ret = rpcc.simple(one="zap", two="zip")
iomock.assert_called_once_with('SimpleInterface_simple_in', one='zap', two='zip')
self.assertEquals(ret, "bidirmsg")
def test_rpc_client_with_unnamed_args(self):
rpcc = RPCClient(to_name="simply", iface=ISimpleInterface)
self.assertRaises(BadRequest, rpcc.simple, "zap", "zip")
@attr('UNIT')
class TestRPCResponseEndpoint(PyonTestCase, RecvMockMixin):
def simple(self, named=None):
"""
The endpoint will fire its received message into here.
"""
self._ar.set(named)
def _do_listen(self, e):
while True:
try:
msg, headers, _ = e.channel.recv()
nm, nh = e.intercept_in(msg, headers)
e._message_received(nm, nh)
except ChannelClosedError:
break
def test_endpoint_receive(self):
self._ar = event.AsyncResult()
# build a command object to be returned by the mocked channel
class FakeMsg(object):
def __init__(self):
self.named = ["ein", "zwei"]
cvalue = FakeMsg()
e = RPCResponseEndpointUnit(routing_obj=self, interceptors={})
ch = self._setup_mock_channel(value=cvalue, op="simple")
e.attach_channel(ch)
self._do_listen(e)
args = self._ar.get(timeout=10)
self.assertEquals(args, ["ein", "zwei"])
@patch('pyon.net.endpoint.get_ion_ts', Mock(return_value=sentinel.ts))
def test_receive_bad_op(self):
class FakeMsg(object):
def __init__(self):
self.named = ["ein", "zwei"]
cvalue = FakeMsg()
e = RPCResponseEndpointUnit(routing_obj=self, interceptors={})
ch = self._setup_mock_channel(value=cvalue, op="no_exist")
e.attach_channel(ch)
self._do_listen(e)
assert_called_once_with_header(self, ch.send, {'status_code': 400,
'error_message': 'Unknown op name: no_exist',
'error_id': '',
'conv-id': sentinel.conv_id,
'conv-seq': 2,
'protocol': '',
'performative': 'failure',
'language': 'scioncc',
'encoding': 'msgpack',
'format': 'list',
'receiver': ',',
'ts': sentinel.ts})
@patch('pyon.net.endpoint.get_ion_ts', Mock(return_value=sentinel.ts))
def test_recv_bad_kwarg(self):
# we try to call simple with the kwarg "not_named" instead of the correct one
class FakeMsg(object):
def __init__(self):
self.not_named = ["ein", "zwei"]
cvalue = FakeMsg()
e = RPCResponseEndpointUnit(routing_obj=self, interceptors={})
ch = self._setup_mock_channel(value=cvalue, op="simple")
e.attach_channel(ch)
self._do_listen(e)
# test to make sure send got called with our error
assert_called_once_with_header(self, ch.send, {'status_code': 400,
'error_message': 'Argument not_named not present in op signature',
'error_id': '',
'conv-id': sentinel.conv_id,
'conv-seq': 2,
'protocol': '',
'performative': 'failure',
'language': 'scioncc',
'encoding': 'msgpack',
'format': 'NoneType',
'receiver': ',',
'msg-rcvd': ANY,
'ts': sentinel.ts})
def test__message_received_interceptor_exception(self):
e = RPCResponseEndpointUnit(routing_obj=self)
e.send = Mock()
e.send.return_value = sentinel.sent
e.channel = Mock()
with patch('pyon.net.endpoint.ResponseEndpointUnit._message_received', new=Mock(side_effect=exception.IonException)):
retval = e._message_received(sentinel.msg, {})
self.assertEquals(retval, sentinel.sent)
assert_called_once_with_header(self, e.send, {'status_code': -1,
'error_message':'',
'error_id': '',
'conv-id': '',
'conv-seq': 2,
'protocol':'',
'performative': 'failure'})
def error_op(self):
"""
Routing method for next test, raises an IonException.
"""
raise exception.Unauthorized(sentinel.unauth)
def test__message_received_error_in_op(self):
# we want to make sure IonExceptions raised in business logic get a response, now that
# _message_received sends the responses
class FakeMsg(object):
pass
cvalue = FakeMsg()
e = RPCResponseEndpointUnit(routing_obj=self, interceptors={})
ch = self._setup_mock_channel(value=cvalue, op="error_op")
e.attach_channel(ch)
e.send = Mock()
self._do_listen(e)
assert_called_once_with_header(self, e.send, {'status_code': 401,
'error_message': str(sentinel.unauth),
'error_id': '',
'conv-id': sentinel.conv_id,
'conv-seq': 2,
'protocol':'',
'performative':'failure'})
def test_message_received_dict(self):
rout_obj = Mock()
e = RPCResponseEndpointUnit(routing_obj=rout_obj)
msg_dict = {'iam':'adict'}
e.message_received(msg_dict, {'op':'anyop'})
rout_obj.anyop.assert_called_once_with(iam='adict')
def test_message_received_unknown_msg_type(self):
rout_obj = Mock()
e = RPCResponseEndpointUnit(routing_obj=rout_obj)
self.assertRaises(exception.BadRequest, e.message_received, 3, {})
@attr('UNIT')
class TestRPCServer(PyonTestCase, RecvMockMixin):
def test_rpc_server(self):
node = Mock(spec=NodeB)
svc = SimpleService()
rpcs = RPCServer(node=node, from_name="testrpc", service=svc)
node.interceptors = {}
# build a command object to be returned by the mocked channel
class FakeMsg(object):
def __init__(self):
self.named = ["ein", "zwei"]
cvalue = FakeMsg()
listen_channel_mock = self._setup_mock_channel(ch_type=ServerChannel)
rpcs.node.channel.return_value = listen_channel_mock
# tell our channel to return a mocked handler channel when accepted (listen() implementation detail)
listen_channel_mock.accept.return_value = self._setup_mock_channel(ch_type=ServerChannel.BidirAcceptChannel, value=cvalue, op="simple")
rpcs.listen()
# wait for first message to get passed in
ret = svc._ar.get()
self.assertIsInstance(ret, list)
self.assertEquals(ret, ["ein", "zwei"])
if __name__ == "__main__":
unittest.main()
| |
#Python Tic-Tac-Toe Game
import random
import collections as c
import os
import time
#The game board
board = b = list(range(1,10))
rows = ['012', '345', '678']
columns = ['036', '147', '258']
X = ['048', '246']
def show_Board(): #Displays game board
print(board[0], '|', board[1], '|', board[2])
print('---------')
print(board[3], '|', board[4], '|', board[5])
print('---------')
print(board[6], '|', board[7], '|', board[8])
print('')
def getrow(space): #Gets the row of a space
for i in range(3):
if str(space) in rows[i]:
return(i)
def getcolumn(space): #Gets the column of a space
for i in range(3):
if str(space) in columns[i]:
return(i)
def getX(space): #Gets the diagonal of a space, and checks if center
if space == 4:
return(2)
for i in range(2):
if str(space) in X[i]:
return(i)
def check_Row(space): #Checks for X's and for empty spaces in row
RS = rows[getrow(space)]
ARS = []
if 'X' not in set(b[int(RS[i])] for i in range(3)):
for i in RS:
if board[int(i)] != 'O':
ARS.append(i)
return(ARS)
def check_Column(space): #Checks for X's and for empty spaces in column
CS = columns[getcolumn(space)]
ACS = []
if 'X' not in set(b[int(CS[i])] for i in range(3)):
for i in CS:
if board[int(i)] != 'O':
ACS.append(i)
return(ACS)
def check_Center(): #Checks for X's and for empty spaces in both diagonals
ACS = []
if 'X' not in set(b[int(X[0][i])] for i in range(3)):
for i in X[0]:
if board[int(i)] != 'O':
ACS.append(i)
if 'X' not in set(b[int(X[1][i])] for i in range(3)):
for i in X[1]:
if board[int(i)] != 'O':
ACS.append(i)
return(ACS)
def check_X(space): #Checks for X's and for empty spaces in diagonal
AXS = []
if getX(space) == 2:
for i in check_Center():
AXS.append(i)
return(AXS)
XS = X[getX(space)]
if 'X' not in set(b[int(XS[i])] for i in range(3)):
for i in XS:
if board[int(i)] != 'O':
AXS.append(i)
return(AXS)
def scan_Board(): #Checks for open spaces
openspaces = osp = []
for space in range(9):
if board[space] == 'O':
if check_Row(space) != None:
for i in check_Row(space):
osp.append(int(i))
if check_Column(space) != None:
for i in check_Column(space):
osp.append(int(i))
if getX(space) != None:
if check_X(space) != None:
for i in check_X(space):
osp.append(int(i))
if all(isinstance(spaces, int) for spaces in board):
osp.append(space)
return openspaces
def can_Win(player): #Checks if (player) has 2-in-a-row
for row in rows:
if list(board[int(row[i])] for i in range(3)).count(player) == 2:
for i in row:
if isinstance(board[int(i)], int):
return(int(i))
for column in columns:
if list(board[int(column[i])] for i in range(3)).count(player) == 2:
for i in column:
if isinstance(board[int(i)], int):
return(int(i))
for diag in X:
if list(board[int(diag[i])] for i in range(3)).count(player) == 2:
for i in diag:
if isinstance(board[int(i)], int):
return(int(i))
return(None)
def player_Move(mark): #Player's turn
while True:
while True:
try:
move = int(input("Select a spot: "))
break
except ValueError:
print("Input an available number from the board.")
if int(move) not in board:
print("Number out of range. Choose an available spot on the board.")
continue
else:
move = int(move) - 1
if board[move] != 'X' and board[move] != 'O':
board[move] = mark
break
else:
print("Spot already taken. Select another.")
continue
def com_Move(): #Computer's turn. Computer scans board and picks from the best possible spaces.
while True:
if can_Win('O') != None: #Go for the win if possible!
board[(can_Win('O'))] = 'O'
break
if can_Win('X') != None: #Block player from winning!
board[can_Win('X')] = 'O'
break
openspaces = osp = scan_Board()
bestspaces = bs = []
count = c.Counter(osp) #Counts how many 'points' each space has
#print(openspaces)
if not openspaces:
for i in range(9):
if isinstance(board[i], int):
board[i] = 'O'
break
break
if 3 in count.values():
for k,v in count.items():
if v == 3:
bs.append(k)
board[random.choice(bs)] = 'O'
break
if 2 in count.values():
for k,v in count.items():
if v == 2:
bs.append(k)
board[random.choice(bs)] = 'O'
break
else:
board[random.choice(osp)] = 'O'
break
def win_Check(): #Checks for 3-in-a-row
for row in rows:
lst = list(board[int(row[i])] for i in range(3))
if lst[1:] == lst[:-1]:
return True
for column in columns:
lst = list(board[int(column[i])] for i in range(3))
if lst[1:] == lst[:-1]:
return True
for diag in X:
lst = list(board[int(diag[i])] for i in range(3))
if lst[1:] == lst[:-1]:
return True
else:
return False
def player_Turn(mark, player):
os.system('cls')
print(player + "'s move...")
show_Board()
player_Move(mark)
def computer_Turn():
os.system('cls')
print("Computer's move...")
show_Board()
time.sleep(2)
os.system('cls')
com_Move()
#------------------------------------------------------------------------------
while True:
gamemode = input("PvP or PvC? ")
if gamemode != "PvP" and gamemode != "PvC":
print("Please choose a valid game mode.")
continue
break
if gamemode == 'PvC':
if random.randrange(0,100) > 50: #Randomly selects who starts
for i in range(5):
player_Turn('X', 'Player')
if win_Check() == True:
os.system('cls')
print("You won the game!")
show_Board()
break
if i == 4:
os.system('cls')
print("It's a cat's game!")
show_Board()
break
computer_Turn()
if win_Check() == True:
os.system('cls')
print("Computer won the game!")
show_Board()
break
else:
for i in range(5):
computer_Turn()
if win_Check() == True:
os.system('cls')
print("Computer won the game!")
show_Board()
break
if i == 4:
os.system('cls')
print("It's a cat's game!")
show_Board()
break
player_Turn('X', 'Player')
if win_Check() == True:
os.system('cls')
print("You won the game!")
show_Board()
break
if gamemode == 'PvP':
for i in range(5):
player_Turn('O', 'Player 1')
if win_Check() == True:
os.system('cls')
print("Player 1 won the game!")
show_Board()
break
if i == 4:
os.system('cls')
print("It's a cat's game!")
show_Board()
break
player_Turn('X', 'Player 2')
if win_Check() == True:
os.system('cls')
print("Player 2 won the game!")
show_Board()
break
input()
| |
from __future__ import unicode_literals
import boto3
import json
import sure # noqa
from botocore.exceptions import ClientError
import pytest
from moto import mock_sns, mock_sqs
from moto.sns.models import (
DEFAULT_PAGE_SIZE,
DEFAULT_EFFECTIVE_DELIVERY_POLICY,
DEFAULT_ACCOUNT_ID,
)
@mock_sns
def test_subscribe_sms():
client = boto3.client("sns", region_name="us-east-1")
client.create_topic(Name="some-topic")
resp = client.create_topic(Name="some-topic")
arn = resp["TopicArn"]
resp = client.subscribe(TopicArn=arn, Protocol="sms", Endpoint="+15551234567")
resp.should.have.key("SubscriptionArn")
resp = client.subscribe(TopicArn=arn, Protocol="sms", Endpoint="+15/55-123.4567")
resp.should.have.key("SubscriptionArn")
@mock_sns
def test_double_subscription():
client = boto3.client("sns", region_name="us-east-1")
client.create_topic(Name="some-topic")
resp = client.create_topic(Name="some-topic")
arn = resp["TopicArn"]
do_subscribe_sqs = lambda sqs_arn: client.subscribe(
TopicArn=arn, Protocol="sqs", Endpoint=sqs_arn
)
resp1 = do_subscribe_sqs("arn:aws:sqs:elasticmq:000000000000:foo")
resp2 = do_subscribe_sqs("arn:aws:sqs:elasticmq:000000000000:foo")
resp1["SubscriptionArn"].should.equal(resp2["SubscriptionArn"])
@mock_sns
def test_subscribe_bad_sms():
client = boto3.client("sns", region_name="us-east-1")
client.create_topic(Name="some-topic")
resp = client.create_topic(Name="some-topic")
arn = resp["TopicArn"]
try:
# Test invalid number
client.subscribe(TopicArn=arn, Protocol="sms", Endpoint="NAA+15551234567")
except ClientError as err:
err.response["Error"]["Code"].should.equal("InvalidParameter")
client.subscribe.when.called_with(
TopicArn=arn, Protocol="sms", Endpoint="+15--551234567"
).should.throw(ClientError, "Invalid SMS endpoint: +15--551234567")
client.subscribe.when.called_with(
TopicArn=arn, Protocol="sms", Endpoint="+15551234567."
).should.throw(ClientError, "Invalid SMS endpoint: +15551234567.")
client.subscribe.when.called_with(
TopicArn=arn, Protocol="sms", Endpoint="/+15551234567"
).should.throw(ClientError, "Invalid SMS endpoint: /+15551234567")
@mock_sns
def test_creating_subscription():
conn = boto3.client("sns", region_name="us-east-1")
conn.create_topic(Name="some-topic")
response = conn.list_topics()
topic_arn = response["Topics"][0]["TopicArn"]
conn.subscribe(TopicArn=topic_arn, Protocol="http", Endpoint="http://example.com/")
subscriptions = conn.list_subscriptions()["Subscriptions"]
subscriptions.should.have.length_of(1)
subscription = subscriptions[0]
subscription["TopicArn"].should.equal(topic_arn)
subscription["Protocol"].should.equal("http")
subscription["SubscriptionArn"].should.contain(topic_arn)
subscription["Endpoint"].should.equal("http://example.com/")
# Now unsubscribe the subscription
conn.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"])
# And there should be zero subscriptions left
subscriptions = conn.list_subscriptions()["Subscriptions"]
subscriptions.should.have.length_of(0)
@mock_sns
def test_unsubscribe_from_deleted_topic():
client = boto3.client("sns", region_name="us-east-1")
client.create_topic(Name="some-topic")
response = client.list_topics()
topic_arn = response["Topics"][0]["TopicArn"]
client.subscribe(
TopicArn=topic_arn, Protocol="http", Endpoint="http://example.com/"
)
subscriptions = client.list_subscriptions()["Subscriptions"]
subscriptions.should.have.length_of(1)
subscription = subscriptions[0]
subscription_arn = subscription["SubscriptionArn"]
subscription["TopicArn"].should.equal(topic_arn)
subscription["Protocol"].should.equal("http")
subscription_arn.should.contain(topic_arn)
subscription["Endpoint"].should.equal("http://example.com/")
# Now delete the topic
client.delete_topic(TopicArn=topic_arn)
# And there should now be 0 topics
topics_json = client.list_topics()
topics = topics_json["Topics"]
topics.should.have.length_of(0)
# as per the documentation deleting a topic deletes all the subscriptions
subscriptions = client.list_subscriptions()["Subscriptions"]
subscriptions.should.have.length_of(0)
# Now delete hanging subscription
client.unsubscribe(SubscriptionArn=subscription_arn)
subscriptions = client.list_subscriptions()["Subscriptions"]
subscriptions.should.have.length_of(0)
# Deleting it again should not result in any error
client.unsubscribe(SubscriptionArn=subscription_arn)
@mock_sns
def test_getting_subscriptions_by_topic():
conn = boto3.client("sns", region_name="us-east-1")
conn.create_topic(Name="topic1")
conn.create_topic(Name="topic2")
response = conn.list_topics()
topics = response["Topics"]
topic1_arn = topics[0]["TopicArn"]
topic2_arn = topics[1]["TopicArn"]
conn.subscribe(
TopicArn=topic1_arn, Protocol="http", Endpoint="http://example1.com/"
)
conn.subscribe(
TopicArn=topic2_arn, Protocol="http", Endpoint="http://example2.com/"
)
topic1_subscriptions = conn.list_subscriptions_by_topic(TopicArn=topic1_arn)[
"Subscriptions"
]
topic1_subscriptions.should.have.length_of(1)
topic1_subscriptions[0]["Endpoint"].should.equal("http://example1.com/")
@mock_sns
def test_subscription_paging():
conn = boto3.client("sns", region_name="us-east-1")
conn.create_topic(Name="topic1")
response = conn.list_topics()
topics = response["Topics"]
topic1_arn = topics[0]["TopicArn"]
for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 3)):
conn.subscribe(
TopicArn=topic1_arn,
Protocol="email",
Endpoint="email_" + str(index) + "@test.com",
)
all_subscriptions = conn.list_subscriptions()
all_subscriptions["Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE)
next_token = all_subscriptions["NextToken"]
next_token.should.equal(str(DEFAULT_PAGE_SIZE))
all_subscriptions = conn.list_subscriptions(NextToken=next_token)
all_subscriptions["Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE / 3))
all_subscriptions.shouldnt.have("NextToken")
topic1_subscriptions = conn.list_subscriptions_by_topic(TopicArn=topic1_arn)
topic1_subscriptions["Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE)
next_token = topic1_subscriptions["NextToken"]
next_token.should.equal(str(DEFAULT_PAGE_SIZE))
topic1_subscriptions = conn.list_subscriptions_by_topic(
TopicArn=topic1_arn, NextToken=next_token
)
topic1_subscriptions["Subscriptions"].should.have.length_of(
int(DEFAULT_PAGE_SIZE / 3)
)
topic1_subscriptions.shouldnt.have("NextToken")
@mock_sns
def test_subscribe_attributes():
client = boto3.client("sns", region_name="us-east-1")
client.create_topic(Name="some-topic")
resp = client.create_topic(Name="some-topic")
arn = resp["TopicArn"]
resp = client.subscribe(TopicArn=arn, Protocol="http", Endpoint="http://test.com")
response = client.get_subscription_attributes(
SubscriptionArn=resp["SubscriptionArn"]
)
response.should.contain("Attributes")
attributes = response["Attributes"]
attributes["PendingConfirmation"].should.equal("false")
attributes["ConfirmationWasAuthenticated"].should.equal("true")
attributes["Endpoint"].should.equal("http://test.com")
attributes["TopicArn"].should.equal(arn)
attributes["Protocol"].should.equal("http")
attributes["SubscriptionArn"].should.equal(resp["SubscriptionArn"])
attributes["Owner"].should.equal(str(DEFAULT_ACCOUNT_ID))
attributes["RawMessageDelivery"].should.equal("false")
json.loads(attributes["EffectiveDeliveryPolicy"]).should.equal(
DEFAULT_EFFECTIVE_DELIVERY_POLICY
)
@mock_sns
def test_creating_subscription_with_attributes():
conn = boto3.client("sns", region_name="us-east-1")
conn.create_topic(Name="some-topic")
response = conn.list_topics()
topic_arn = response["Topics"][0]["TopicArn"]
delivery_policy = json.dumps(
{
"healthyRetryPolicy": {
"numRetries": 10,
"minDelayTarget": 1,
"maxDelayTarget": 2,
}
}
)
filter_policy = json.dumps(
{
"store": ["example_corp"],
"event": ["order_cancelled"],
"encrypted": [False],
"customer_interests": ["basketball", "baseball"],
"price": [100, 100.12],
"error": [None],
}
)
conn.subscribe(
TopicArn=topic_arn,
Protocol="http",
Endpoint="http://example.com/",
Attributes={
"RawMessageDelivery": "true",
"DeliveryPolicy": delivery_policy,
"FilterPolicy": filter_policy,
},
)
subscriptions = conn.list_subscriptions()["Subscriptions"]
subscriptions.should.have.length_of(1)
subscription = subscriptions[0]
subscription["TopicArn"].should.equal(topic_arn)
subscription["Protocol"].should.equal("http")
subscription["SubscriptionArn"].should.contain(topic_arn)
subscription["Endpoint"].should.equal("http://example.com/")
# Test the subscription attributes have been set
subscription_arn = subscription["SubscriptionArn"]
attrs = conn.get_subscription_attributes(SubscriptionArn=subscription_arn)
attrs["Attributes"]["RawMessageDelivery"].should.equal("true")
attrs["Attributes"]["DeliveryPolicy"].should.equal(delivery_policy)
attrs["Attributes"]["FilterPolicy"].should.equal(filter_policy)
# Now unsubscribe the subscription
conn.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"])
# And there should be zero subscriptions left
subscriptions = conn.list_subscriptions()["Subscriptions"]
subscriptions.should.have.length_of(0)
# invalid attr name
with pytest.raises(ClientError):
conn.subscribe(
TopicArn=topic_arn,
Protocol="http",
Endpoint="http://example.com/",
Attributes={"InvalidName": "true"},
)
@mock_sns
@mock_sqs
def test_delete_subscriptions_on_delete_topic():
sqs = boto3.client("sqs", region_name="us-east-1")
conn = boto3.client("sns", region_name="us-east-1")
queue = sqs.create_queue(QueueName="test-queue")
topic = conn.create_topic(Name="some-topic")
conn.subscribe(
TopicArn=topic.get("TopicArn"), Protocol="sqs", Endpoint=queue.get("QueueUrl")
)
subscriptions = conn.list_subscriptions()["Subscriptions"]
subscriptions.should.have.length_of(1)
conn.delete_topic(TopicArn=topic.get("TopicArn"))
subscriptions = conn.list_subscriptions()["Subscriptions"]
subscriptions.should.have.length_of(0)
@mock_sns
def test_set_subscription_attributes():
conn = boto3.client("sns", region_name="us-east-1")
conn.create_topic(Name="some-topic")
response = conn.list_topics()
topic_arn = response["Topics"][0]["TopicArn"]
conn.subscribe(TopicArn=topic_arn, Protocol="http", Endpoint="http://example.com/")
subscriptions = conn.list_subscriptions()["Subscriptions"]
subscriptions.should.have.length_of(1)
subscription = subscriptions[0]
subscription["TopicArn"].should.equal(topic_arn)
subscription["Protocol"].should.equal("http")
subscription["SubscriptionArn"].should.contain(topic_arn)
subscription["Endpoint"].should.equal("http://example.com/")
subscription_arn = subscription["SubscriptionArn"]
attrs = conn.get_subscription_attributes(SubscriptionArn=subscription_arn)
attrs.should.have.key("Attributes")
conn.set_subscription_attributes(
SubscriptionArn=subscription_arn,
AttributeName="RawMessageDelivery",
AttributeValue="true",
)
delivery_policy = json.dumps(
{
"healthyRetryPolicy": {
"numRetries": 10,
"minDelayTarget": 1,
"maxDelayTarget": 2,
}
}
)
conn.set_subscription_attributes(
SubscriptionArn=subscription_arn,
AttributeName="DeliveryPolicy",
AttributeValue=delivery_policy,
)
filter_policy = json.dumps(
{
"store": ["example_corp"],
"event": ["order_cancelled"],
"encrypted": [False],
"customer_interests": ["basketball", "baseball"],
"price": [100, 100.12],
"error": [None],
}
)
conn.set_subscription_attributes(
SubscriptionArn=subscription_arn,
AttributeName="FilterPolicy",
AttributeValue=filter_policy,
)
attrs = conn.get_subscription_attributes(SubscriptionArn=subscription_arn)
attrs["Attributes"]["RawMessageDelivery"].should.equal("true")
attrs["Attributes"]["DeliveryPolicy"].should.equal(delivery_policy)
attrs["Attributes"]["FilterPolicy"].should.equal(filter_policy)
# not existing subscription
with pytest.raises(ClientError):
conn.set_subscription_attributes(
SubscriptionArn="invalid",
AttributeName="RawMessageDelivery",
AttributeValue="true",
)
with pytest.raises(ClientError):
attrs = conn.get_subscription_attributes(SubscriptionArn="invalid")
# invalid attr name
with pytest.raises(ClientError):
conn.set_subscription_attributes(
SubscriptionArn=subscription_arn,
AttributeName="InvalidName",
AttributeValue="true",
)
@mock_sns
def test_subscribe_invalid_filter_policy():
conn = boto3.client("sns", region_name="us-east-1")
conn.create_topic(Name="some-topic")
response = conn.list_topics()
topic_arn = response["Topics"][0]["TopicArn"]
try:
conn.subscribe(
TopicArn=topic_arn,
Protocol="http",
Endpoint="http://example.com/",
Attributes={
"FilterPolicy": json.dumps({"store": [str(i) for i in range(151)]})
},
)
except ClientError as err:
err.response["Error"]["Code"].should.equal("InvalidParameter")
err.response["Error"]["Message"].should.equal(
"Invalid parameter: FilterPolicy: Filter policy is too complex"
)
try:
conn.subscribe(
TopicArn=topic_arn,
Protocol="http",
Endpoint="http://example.com/",
Attributes={"FilterPolicy": json.dumps({"store": [["example_corp"]]})},
)
except ClientError as err:
err.response["Error"]["Code"].should.equal("InvalidParameter")
err.response["Error"]["Message"].should.equal(
"Invalid parameter: FilterPolicy: Match value must be String, number, true, false, or null"
)
try:
conn.subscribe(
TopicArn=topic_arn,
Protocol="http",
Endpoint="http://example.com/",
Attributes={"FilterPolicy": json.dumps({"store": [{"exists": None}]})},
)
except ClientError as err:
err.response["Error"]["Code"].should.equal("InvalidParameter")
err.response["Error"]["Message"].should.equal(
"Invalid parameter: FilterPolicy: exists match pattern must be either true or false."
)
try:
conn.subscribe(
TopicArn=topic_arn,
Protocol="http",
Endpoint="http://example.com/",
Attributes={"FilterPolicy": json.dumps({"store": [{"error": True}]})},
)
except ClientError as err:
err.response["Error"]["Code"].should.equal("InvalidParameter")
err.response["Error"]["Message"].should.equal(
"Invalid parameter: FilterPolicy: Unrecognized match type error"
)
try:
conn.subscribe(
TopicArn=topic_arn,
Protocol="http",
Endpoint="http://example.com/",
Attributes={"FilterPolicy": json.dumps({"store": [1000000001]})},
)
except ClientError as err:
err.response["Error"]["Code"].should.equal("InternalFailure")
@mock_sns
def test_check_not_opted_out():
conn = boto3.client("sns", region_name="us-east-1")
response = conn.check_if_phone_number_is_opted_out(phoneNumber="+447428545375")
response.should.contain("isOptedOut")
response["isOptedOut"].should.be(False)
@mock_sns
def test_check_opted_out():
# Phone number ends in 99 so is hardcoded in the endpoint to return opted
# out status
conn = boto3.client("sns", region_name="us-east-1")
response = conn.check_if_phone_number_is_opted_out(phoneNumber="+447428545399")
response.should.contain("isOptedOut")
response["isOptedOut"].should.be(True)
@mock_sns
def test_check_opted_out_invalid():
conn = boto3.client("sns", region_name="us-east-1")
# Invalid phone number
with pytest.raises(ClientError):
conn.check_if_phone_number_is_opted_out(phoneNumber="+44742LALALA")
@mock_sns
def test_list_opted_out():
conn = boto3.client("sns", region_name="us-east-1")
response = conn.list_phone_numbers_opted_out()
response.should.contain("phoneNumbers")
len(response["phoneNumbers"]).should.be.greater_than(0)
@mock_sns
def test_opt_in():
conn = boto3.client("sns", region_name="us-east-1")
response = conn.list_phone_numbers_opted_out()
current_len = len(response["phoneNumbers"])
assert current_len > 0
conn.opt_in_phone_number(phoneNumber=response["phoneNumbers"][0])
response = conn.list_phone_numbers_opted_out()
len(response["phoneNumbers"]).should.be.greater_than(0)
len(response["phoneNumbers"]).should.be.lower_than(current_len)
@mock_sns
def test_confirm_subscription():
conn = boto3.client("sns", region_name="us-east-1")
response = conn.create_topic(Name="testconfirm")
conn.confirm_subscription(
TopicArn=response["TopicArn"],
Token="2336412f37fb687f5d51e6e241d59b68c4e583a5cee0be6f95bbf97ab8d2441cf47b99e848408adaadf4c197e65f03473d53c4ba398f6abbf38ce2e8ebf7b4ceceb2cd817959bcde1357e58a2861b05288c535822eb88cac3db04f592285249971efc6484194fc4a4586147f16916692",
AuthenticateOnUnsubscribe="true",
)
| |
"""Unit tests for AlertDefinition views."""
# Copyright 2016 Solinea, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from rest_framework import status
from rest_framework.test import APITestCase
from goldstone.core.models import SavedSearch, AlertDefinition, Alert, \
EmailProducer
from goldstone.test_utils import CONTENT_NO_CREDENTIALS, \
AUTHORIZATION_PAYLOAD, BAD_TOKEN, CONTENT_BAD_TOKEN, create_and_login
PRODUCER_URL = '/core/producer/'
EMAIL_PRODUCER_URL = '/core/email_producer/'
class ProducerViewTests(APITestCase):
""" Test Producer API """
fixtures = ['core_initial_data.yaml']
def setUp(self):
self.saved_search = SavedSearch.objects.all()[0]
self.alert_def = AlertDefinition(name='alert_def',
search=self.saved_search)
self.alert_def.save()
self.alert = Alert(short_message='test',
long_message='test123',
alert_def=self.alert_def)
self.alert.save()
self.producer = EmailProducer(sender='me@localhost',
receiver='you@localhost',
alert_def=self.alert_def)
self.producer.save()
self.basic_post_body = {
'sender': 'bell@xyz.com',
'receiver': 'watson@xyz.com',
'alert_def': self.alert_def.uuid
}
def test_not_logged_in(self):
"""All operations should fail when not logged in."""
# Try getting resource with no token.
response = self.client.get(PRODUCER_URL)
self.assertContains(response,
CONTENT_NO_CREDENTIALS,
status_code=status.HTTP_401_UNAUTHORIZED)
# Try getting resource a bogus token.
response = self.client.get(
PRODUCER_URL,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN)
self.assertContains(response,
CONTENT_BAD_TOKEN,
status_code=status.HTTP_401_UNAUTHORIZED)
# Try creating resource with no token.
response = self.client.post(PRODUCER_URL,
json.dumps(self.basic_post_body),
content_type="application/json")
self.assertContains(response,
CONTENT_NO_CREDENTIALS,
status_code=status.HTTP_401_UNAUTHORIZED)
# Try creating resource with a bogus token.
response = self.client.post(
PRODUCER_URL,
json.dumps(self.basic_post_body),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN)
self.assertContains(response,
CONTENT_BAD_TOKEN,
status_code=status.HTTP_401_UNAUTHORIZED)
# Try updating resource with no token.
response = self.client.put(
PRODUCER_URL + self.alert_def.uuid + '/',
json.dumps(self.basic_post_body),
content_type="application/json")
self.assertContains(response,
CONTENT_NO_CREDENTIALS,
status_code=status.HTTP_401_UNAUTHORIZED)
# Try updating resource with a bogus token.
response = self.client.put(
PRODUCER_URL + self.alert_def.uuid + '/',
json.dumps(self.basic_post_body),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN)
self.assertContains(response,
CONTENT_BAD_TOKEN,
status_code=status.HTTP_401_UNAUTHORIZED)
def test_post_not_allowed(self):
"""POST operation tests"""
# Create a user and get the authorization token. Then do the test.
token = create_and_login()
# Try creating resource with a valid token.
response = self.client.post(
PRODUCER_URL,
json.dumps(self.basic_post_body),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
# pylint: disable=E1101
self.assertEqual(response.status_code,
status.HTTP_405_METHOD_NOT_ALLOWED)
def test_get(self):
"""GET operation tests"""
# Create a user and get the authorization token. Then do the test.
token = create_and_login()
# We should have at least one result in our list, but could have more
response = self.client.get(
PRODUCER_URL,
accept="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertEqual(response.status_code, status.HTTP_200_OK)
content = json.loads(response.content)
self.assertIn('count', content)
self.assertIn('next', content)
self.assertIn('previous', content)
self.assertIn('results', content)
self.assertIsInstance(content['results'], list)
self.assertGreater(len(content['results']), 0)
# test the structure of the one we loaded
response = self.client.get(
PRODUCER_URL + "%s/" % self.producer.uuid,
accept="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertEqual(response.status_code, status.HTTP_200_OK)
content = json.loads(response.content)
self.assertIn('uuid', content)
self.assertIn('alert_def', content)
self.assertIn('created', content)
self.assertIn('updated', content)
self.assertIn('sender', content)
self.assertIn('receiver', content)
def test_delete_not_allowed(self):
"""DELETE operation tests"""
# Create a user and get the authorization token. Then do the test.
token = create_and_login()
# Try creating resource with a valid token.
response = self.client.delete(
PRODUCER_URL + '%s/' % self.alert_def.uuid,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertEqual(response.status_code,
status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put_not_allowed(self):
"""PUT operation tests"""
# Create a user and get the authorization token. Then do the test.
token = create_and_login()
# Try creating resource with a valid token.
response = self.client.put(
PRODUCER_URL + '%s/' % self.alert_def.uuid,
json.dumps(self.basic_post_body),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertEqual(response.status_code,
status.HTTP_405_METHOD_NOT_ALLOWED)
def test_patch_not_allowed(self):
"""PATCH operation tests"""
# Create a user and get the authorization token. Then do the test.
token = create_and_login()
# Try creating resource with a valid token.
response = self.client.put(
PRODUCER_URL + '%s/' % self.alert_def.uuid,
json.dumps(self.basic_post_body),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertEqual(response.status_code,
status.HTTP_405_METHOD_NOT_ALLOWED)
class EmailProducerViewTests(APITestCase):
""" Test Email Producer API """
fixtures = ['core_initial_data.yaml']
def setUp(self):
self.saved_search = SavedSearch.objects.all()[0]
self.alert_def = AlertDefinition(name='alert_def',
search=self.saved_search)
self.alert_def.save()
self.alert = Alert(short_message='test',
long_message='test123',
alert_def=self.alert_def)
self.alert.save()
self.producer = EmailProducer(sender='me', receiver='you',
alert_def=self.alert_def)
self.producer.save()
self.basic_post_body = {
"sender": "bell@localhost",
"receiver": "watson@localhost",
"alert_def": self.alert_def.uuid
}
def test_not_logged_in(self):
"""All operations should fail when not logged in."""
# Try getting resource with no token.
response = self.client.get(EMAIL_PRODUCER_URL)
self.assertContains(response,
CONTENT_NO_CREDENTIALS,
status_code=status.HTTP_401_UNAUTHORIZED)
# Try getting resource a bogus token.
response = self.client.get(
EMAIL_PRODUCER_URL,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN)
self.assertContains(response,
CONTENT_BAD_TOKEN,
status_code=status.HTTP_401_UNAUTHORIZED)
# Try creating resource with no token.
response = self.client.post(EMAIL_PRODUCER_URL,
json.dumps(self.basic_post_body),
content_type="application/json")
self.assertContains(response,
CONTENT_NO_CREDENTIALS,
status_code=status.HTTP_401_UNAUTHORIZED)
# Try creating resource with a bogus token.
response = self.client.post(
EMAIL_PRODUCER_URL,
json.dumps(self.basic_post_body),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN)
self.assertContains(response,
CONTENT_BAD_TOKEN,
status_code=status.HTTP_401_UNAUTHORIZED)
# Try updating resource with no token.
response = self.client.put(
EMAIL_PRODUCER_URL + self.alert_def.uuid + '/',
json.dumps(self.basic_post_body),
content_type="application/json")
self.assertContains(response,
CONTENT_NO_CREDENTIALS,
status_code=status.HTTP_401_UNAUTHORIZED)
# Try updating resource with a bogus token.
response = self.client.put(
EMAIL_PRODUCER_URL + self.alert_def.uuid + '/',
json.dumps(self.basic_post_body),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN)
self.assertContains(response,
CONTENT_BAD_TOKEN,
status_code=status.HTTP_401_UNAUTHORIZED)
def test_crud(self):
"""POST operation tests"""
# Create a user and get the authorization token. Then do the test.
token = create_and_login()
# Try creating resource with a valid token.
response = self.client.post(
EMAIL_PRODUCER_URL,
json.dumps(self.basic_post_body),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertEqual(response.status_code,
status.HTTP_201_CREATED)
# Quick test of a filtered GET of the new resource
response = self.client.get(
EMAIL_PRODUCER_URL + "?sender=bell@localhost",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertEqual(response.status_code, status.HTTP_200_OK)
content = json.loads(response.content)
self.assertIn('count', content)
self.assertIn('next', content)
self.assertIn('previous', content)
self.assertIn('results', content)
self.assertIsInstance(content['results'], list)
self.assertGreater(len(content['results']), 0)
self.bell_uuid = content['results'][0]['uuid']
# test the structure of the record we posted
response = self.client.get(
EMAIL_PRODUCER_URL + "%s/" % self.bell_uuid,
accept="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertEqual(response.status_code, status.HTTP_200_OK)
content = json.loads(response.content)
self.assertIn('uuid', content)
self.assertIn('alert_def', content)
self.assertIn('created', content)
self.assertIn('updated', content)
self.assertIn('sender', content)
self.assertIn('receiver', content)
self.assertEqual(content['sender'], 'bell@localhost')
self.assertEqual(content['receiver'], 'watson@localhost')
self.bell_content = content
put_body = self.bell_content
put_body['receiver'] = 'howell@localhost'
# Try updating resource with a valid token.
response = self.client.put(
EMAIL_PRODUCER_URL + '%s/' % self.bell_uuid,
json.dumps(put_body),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertEqual(response.status_code,
status.HTTP_200_OK)
# Try patching resource with a valid token.
response = self.client.patch(
EMAIL_PRODUCER_URL + '%s/' % self.bell_uuid,
json.dumps({'receiver': 'watson@localhost'}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertEqual(response.status_code,
status.HTTP_200_OK)
# Try deleting resource with a valid token.
response = self.client.delete(
EMAIL_PRODUCER_URL + '%s/' % self.bell_uuid,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertEqual(response.status_code,
status.HTTP_204_NO_CONTENT)
| |
# Copyright 2016 Mirantis Inc.
# Copyright 2016 IBM Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from debtcollector import removals
from oslo_config import cfg
from oslo_serialization import jsonutils
import six.moves.urllib.parse as parser
from osprofiler.drivers import base
from osprofiler import exc
class Redis(base.Driver):
@removals.removed_kwarg("db", message="'db' parameter is deprecated "
"and will be removed in future. "
"Please specify 'db' in "
"'connection_string' instead.")
def __init__(self, connection_str, db=0, project=None,
service=None, host=None, conf=cfg.CONF, **kwargs):
"""Redis driver for OSProfiler."""
super(Redis, self).__init__(connection_str, project=project,
service=service, host=host,
conf=conf, **kwargs)
try:
from redis import StrictRedis
except ImportError:
raise exc.CommandError(
"To use OSProfiler with Redis driver, "
"please install `redis` library. "
"To install with pip:\n `pip install redis`.")
# only connection over network is supported with schema
# redis://[:password]@host[:port][/db]
self.db = StrictRedis.from_url(self.connection_str)
self.namespace_opt = "osprofiler_opt:"
self.namespace = "osprofiler:" # legacy
self.namespace_error = "osprofiler_error:"
@classmethod
def get_name(cls):
return "redis"
def notify(self, info):
"""Send notifications to Redis.
:param info: Contains information about trace element.
In payload dict there are always 3 ids:
"base_id" - uuid that is common for all notifications
related to one trace. Used to simplify
retrieving of all trace elements from
Redis.
"parent_id" - uuid of parent element in trace
"trace_id" - uuid of current element in trace
With parent_id and trace_id it's quite simple to build
tree of trace elements, which simplify analyze of trace.
"""
data = info.copy()
data["project"] = self.project
data["service"] = self.service
key = self.namespace_opt + data["base_id"]
self.db.lpush(key, jsonutils.dumps(data))
if (self.filter_error_trace
and data.get("info", {}).get("etype") is not None):
self.notify_error_trace(data)
def notify_error_trace(self, data):
"""Store base_id and timestamp of error trace to a separate key."""
key = self.namespace_error + data["base_id"]
value = jsonutils.dumps({
"base_id": data["base_id"],
"timestamp": data["timestamp"]
})
self.db.set(key, value)
def list_traces(self, fields=None):
"""Query all traces from the storage.
:param fields: Set of trace fields to return. Defaults to 'base_id'
and 'timestamp'
:return List of traces, where each trace is a dictionary containing
at least `base_id` and `timestamp`.
"""
fields = set(fields or self.default_trace_fields)
# first get legacy events
result = self._list_traces_legacy(fields)
# with optimized schema trace events are stored in a list
ids = self.db.scan_iter(match=self.namespace_opt + "*")
for i in ids:
# for each trace query the first event to have a timestamp
first_event = jsonutils.loads(self.db.lindex(i, 1))
result.append({key: value for key, value in first_event.items()
if key in fields})
return result
def _list_traces_legacy(self, fields):
# With current schema every event is stored under its own unique key
# To query all traces we first need to get all keys, then
# get all events, sort them and pick up only the first one
ids = self.db.scan_iter(match=self.namespace + "*")
traces = [jsonutils.loads(self.db.get(i)) for i in ids]
traces.sort(key=lambda x: x["timestamp"])
seen_ids = set()
result = []
for trace in traces:
if trace["base_id"] not in seen_ids:
seen_ids.add(trace["base_id"])
result.append({key: value for key, value in trace.items()
if key in fields})
return result
def list_error_traces(self):
"""Returns all traces that have error/exception."""
ids = self.db.scan_iter(match=self.namespace_error + "*")
traces = [jsonutils.loads(self.db.get(i)) for i in ids]
traces.sort(key=lambda x: x["timestamp"])
seen_ids = set()
result = []
for trace in traces:
if trace["base_id"] not in seen_ids:
seen_ids.add(trace["base_id"])
result.append(trace)
return result
def get_report(self, base_id):
"""Retrieves and parses notification from Redis.
:param base_id: Base id of trace elements.
"""
def iterate_events():
for key in self.db.scan_iter(
match=self.namespace + base_id + "*"): # legacy
yield self.db.get(key)
for event in self.db.lrange(self.namespace_opt + base_id, 0, -1):
yield event
for data in iterate_events():
n = jsonutils.loads(data)
trace_id = n["trace_id"]
parent_id = n["parent_id"]
name = n["name"]
project = n["project"]
service = n["service"]
host = n["info"]["host"]
timestamp = n["timestamp"]
self._append_results(trace_id, parent_id, name, project, service,
host, timestamp, n)
return self._parse_results()
class RedisSentinel(Redis, base.Driver):
@removals.removed_kwarg("db", message="'db' parameter is deprecated "
"and will be removed in future. "
"Please specify 'db' in "
"'connection_string' instead.")
def __init__(self, connection_str, db=0, project=None,
service=None, host=None, conf=cfg.CONF, **kwargs):
"""Redis driver for OSProfiler."""
super(RedisSentinel, self).__init__(connection_str, project=project,
service=service, host=host,
conf=conf, **kwargs)
try:
from redis.sentinel import Sentinel
except ImportError:
raise exc.CommandError(
"To use this command, you should install "
"'redis' manually. Use command:\n "
"'pip install redis'.")
self.conf = conf
socket_timeout = self.conf.profiler.socket_timeout
parsed_url = parser.urlparse(self.connection_str)
sentinel = Sentinel([(parsed_url.hostname, int(parsed_url.port))],
password=parsed_url.password,
socket_timeout=socket_timeout)
self.db = sentinel.master_for(self.conf.profiler.sentinel_service_name,
socket_timeout=socket_timeout)
@classmethod
def get_name(cls):
return "redissentinel"
| |
from keras.models import Model
from keras.layers import Input, Masking, Dense, LSTM
from keras.layers import Dropout, TimeDistributed, Bidirectional, merge
from keras.layers.embeddings import Embedding
from keras.utils import np_utils
import numpy as np
import pandas as pd
import sys
import math
import os
from datetime import datetime
# add path
sys.path.append('../')
sys.path.append('../tools')
from tools import conf
from tools import load_data
from tools import prepare
from tools import plot
np.random.seed(0)
# train hyperparameters
step_length = conf.ner_step_length
pos_length = conf.ner_pos_length
chunk_length = conf.ner_chunk_length
gazetteer_length = conf.BIOES_gazetteer_length
emb_vocab = conf.senna_vocab
emb_length = conf.senna_length
hash_vocab = conf.ner_hash_vocab
hash_length = conf.ner_hash_length
output_length = conf.ner_BIOES_length
batch_size = conf.batch_size
nb_epoch = conf.nb_epoch
model_name = os.path.basename(__file__)[:-3]
folder_path = 'model/%s'%model_name
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
# the data, shuffled and split between train and test sets
train_data = load_data.load_ner(dataset='eng.train', form='BIOES')
dev_data = load_data.load_ner(dataset='eng.testa', form='BIOES')
train_samples = len(train_data)
dev_samples = len(dev_data)
print('train shape:', train_samples)
print('dev shape:', dev_samples)
print()
word_embedding = pd.read_csv('../preprocessing/senna/embeddings.txt', delimiter=' ', header=None)
word_embedding = word_embedding.values
word_embedding = np.concatenate([np.zeros((1,emb_length)),word_embedding, np.random.uniform(-1,1,(1,emb_length))])
hash_embedding = pd.read_csv('../preprocessing/ner-auto-encoder-2/auto-encoder-embeddings.txt', delimiter=' ', header=None)
hash_embedding = hash_embedding.values
hash_embedding = np.concatenate([np.zeros((1,hash_length)),hash_embedding, np.random.rand(1,hash_length)])
embed_index_input = Input(shape=(step_length,))
embedding = Embedding(emb_vocab+2, emb_length, weights=[word_embedding], mask_zero=True, input_length=step_length)(embed_index_input)
hash_index_input = Input(shape=(step_length,))
encoder_embedding = Embedding(hash_vocab+2, hash_length, weights=[hash_embedding], mask_zero=True, input_length=step_length)(hash_index_input)
pos_input = Input(shape=(step_length, pos_length))
chunk_input = Input(shape=(step_length, chunk_length))
gazetteer_input = Input(shape=(step_length, gazetteer_length))
senna_hash_pos_chunk_gazetteer_merge = merge([embedding, encoder_embedding, pos_input, chunk_input, gazetteer_input], mode='concat')
input_mask = Masking(mask_value=0)(senna_hash_pos_chunk_gazetteer_merge)
dp_1 = Dropout(0.5)(input_mask)
hidden_1 = Bidirectional(LSTM(128, return_sequences=True))(dp_1)
hidden_2 = Bidirectional(LSTM(64, return_sequences=True))(hidden_1)
dp_2 = Dropout(0.5)(hidden_2)
output = TimeDistributed(Dense(output_length, activation='softmax'))(dp_2)
model = Model(input=[embed_index_input,hash_index_input,pos_input,chunk_input, gazetteer_input], output=output)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
print(model.summary())
number_of_train_batches = int(math.ceil(float(train_samples)/batch_size))
number_of_dev_batches = int(math.ceil(float(dev_samples)/batch_size))
print('start train %s ...\n'%model_name)
best_accuracy = 0
best_epoch = 0
all_train_loss = []
all_dev_loss = []
all_dev_accuracy = []
log = open('%s/model_log.txt'%folder_path, 'w')
start_time = datetime.now()
print('train start at %s\n'%str(start_time))
log.write('train start at %s\n\n'%str(start_time))
for epoch in range(nb_epoch):
start = datetime.now()
print('-'*60)
print('epoch %d start at %s'%(epoch, str(start)))
log.write('-'*60+'\n')
log.write('epoch %d start at %s\n'%(epoch, str(start)))
train_loss = 0
dev_loss = 0
np.random.shuffle(train_data)
for i in range(number_of_train_batches):
train_batch = train_data[i*batch_size: (i+1)*batch_size]
embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=train_batch, form='BIOES', gram='bi')
pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
gazetteer, length_2 = prepare.prepare_gazetteer_BIOES(batch=train_batch)
gazetteer = np.array([(np.concatenate([a, np.zeros((step_length-length_2[l], gazetteer_length))])) for l,a in enumerate(gazetteer)])
y = np.array([np_utils.to_categorical(each, output_length) for each in label])
train_metrics = model.train_on_batch([embed_index, hash_index, pos, chunk, gazetteer], y)
train_loss += train_metrics[0]
all_train_loss.append(train_loss)
correct_predict = 0
all_predict = 0
for j in range(number_of_dev_batches):
dev_batch = dev_data[j*batch_size: (j+1)*batch_size]
embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=dev_batch, form='BIOES', gram='bi')
pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
gazetteer, length_2 = prepare.prepare_gazetteer_BIOES(batch=dev_batch)
gazetteer = np.array([(np.concatenate([a, np.zeros((step_length-length_2[l], gazetteer_length))])) for l,a in enumerate(gazetteer)])
y = np.array([np_utils.to_categorical(each, output_length) for each in label])
# for loss
dev_metrics = model.test_on_batch([embed_index, hash_index, pos, chunk, gazetteer], y)
dev_loss += dev_metrics[0]
# for accuracy
prob = model.predict_on_batch([embed_index, hash_index, pos, chunk, gazetteer])
for i, l in enumerate(length):
predict_label = np_utils.categorical_probas_to_classes(prob[i])
correct_predict += np.sum(predict_label[:l]==label[i][:l])
all_predict += np.sum(length)
epcoh_accuracy = float(correct_predict)/all_predict
all_dev_accuracy.append(epcoh_accuracy)
all_dev_loss.append(dev_loss)
if epcoh_accuracy>=best_accuracy:
best_accuracy = epcoh_accuracy
best_epoch = epoch
end = datetime.now()
model.save('%s/model_epoch_%d.h5'%(folder_path, epoch), overwrite=True)
print('epoch %d end at %s'%(epoch, str(end)))
print('epoch %d train loss: %f'%(epoch, train_loss))
print('epoch %d dev loss: %f'%(epoch, dev_loss))
print('epoch %d dev accuracy: %f'%(epoch, epcoh_accuracy))
print('best epoch now: %d\n'%best_epoch)
log.write('epoch %d end at %s\n'%(epoch, str(end)))
log.write('epoch %d train loss: %f\n'%(epoch, train_loss))
log.write('epoch %d dev loss: %f\n'%(epoch, dev_loss))
log.write('epoch %d dev accuracy: %f\n'%(epoch, epcoh_accuracy))
log.write('best epoch now: %d\n\n'%best_epoch)
end_time = datetime.now()
print('train end at %s\n'%str(end_time))
log.write('train end at %s\n\n'%str(end_time))
timedelta = end_time - start_time
print('train cost time: %s\n'%str(timedelta))
print('best epoch last: %d\n'%best_epoch)
log.write('train cost time: %s\n\n'%str(timedelta))
log.write('best epoch last: %d\n\n'%best_epoch)
plot.plot_loss(all_train_loss, all_dev_loss, folder_path=folder_path, title='%s'%model_name)
plot.plot_accuracy(all_dev_accuracy, folder_path=folder_path, title='%s'%model_name)
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import imp
import subprocess
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from distutils import spawn
try:
import colorama
colorama.init() # Initialize colorama on Windows
except ImportError:
# Don't require colorama just for running paver tasks. This allows us to
# run `paver install' without requiring the user to first have colorama
# installed.
pass
# Add the current directory to the module search path.
sys.path.insert(0, os.path.abspath('.'))
## Constants
CODE_DIRECTORY = 'mysqlbinlog2blinker'
DOCS_DIRECTORY = 'docs'
TESTS_DIRECTORY = 'tests'
PYTEST_FLAGS = ['--doctest-modules']
# Import metadata. Normally this would just be:
#
# from mysqlbinlog2blinker import metadata
#
# However, when we do this, we also import `mysqlbinlog2blinker/__init__.py'.
# If this imports names from some other modules and these modules have
# third-party dependencies that need installing (which happens after this file
# is run), the
# script will crash. What we do instead is to load the metadata module by path
# instead, effectively side-stepping the dependency problem. Please make sure
# metadata has no dependencies, otherwise they will need to be added to
# the setup_requires keyword.
metadata = imp.load_source(
'metadata', os.path.join(CODE_DIRECTORY, 'metadata.py'))
## Miscellaneous helper functions
def get_project_files():
"""Retrieve a list of project files, ignoring hidden files.
:return: sorted list of project files
:rtype: :class:`list`
"""
if is_git_project() and has_git():
return get_git_project_files()
project_files = []
for top, subdirs, files in os.walk('.'):
for subdir in subdirs:
if subdir.startswith('.'):
subdirs.remove(subdir)
for f in files:
if f.startswith('.'):
continue
project_files.append(os.path.join(top, f))
return project_files
def is_git_project():
return os.path.isdir('.git')
def has_git():
return bool(spawn.find_executable("git"))
def get_git_project_files():
"""Retrieve a list of all non-ignored files, including untracked files,
excluding deleted files.
:return: sorted list of git project files
:rtype: :class:`list`
"""
cached_and_untracked_files = git_ls_files(
'--cached', # All files cached in the index
'--others', # Untracked files
# Exclude untracked files that would be excluded by .gitignore, etc.
'--exclude-standard')
uncommitted_deleted_files = git_ls_files('--deleted')
# Since sorting of files in a set is arbitrary, return a sorted list to
# provide a well-defined order to tools like flake8, etc.
return sorted(cached_and_untracked_files - uncommitted_deleted_files)
def git_ls_files(*cmd_args):
"""Run ``git ls-files`` in the top-level project directory. Arguments go
directly to execution call.
:return: set of file names
:rtype: :class:`set`
"""
cmd = ['git', 'ls-files']
cmd.extend(cmd_args)
return set(subprocess.check_output(cmd).splitlines())
def print_success_message(message):
"""Print a message indicating success in green color to STDOUT.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.GREEN + message + colorama.Fore.RESET)
except ImportError:
print(message)
def print_failure_message(message):
"""Print a message indicating failure in red color to STDERR.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.RED + message + colorama.Fore.RESET,
file=sys.stderr)
except ImportError:
print(message, file=sys.stderr)
def read(filename):
"""Return the contents of a file.
:param filename: file path
:type filename: :class:`str`
:return: the file's content
:rtype: :class:`str`
"""
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
def _lint():
"""Run lint and return an exit code."""
# Flake8 doesn't have an easy way to run checks using a Python function, so
# just fork off another process to do it.
# Python 3 compat:
# - The result of subprocess call outputs are byte strings, meaning we need
# to pass a byte string to endswith.
project_python_files = [filename for filename in get_project_files()
if filename.endswith(b'.py')]
retcode = subprocess.call(
['flake8', '--max-complexity=10'] + project_python_files)
if retcode == 0:
print_success_message('No style errors')
return retcode
def _test():
"""Run the unit tests.
:return: exit code
"""
# Make sure to import pytest in this function. For the reason, see here:
# <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
import pytest
# This runs the unit tests.
# It also runs doctest, but only on the modules in TESTS_DIRECTORY.
return pytest.main(PYTEST_FLAGS + [TESTS_DIRECTORY])
def _test_all():
"""Run lint and tests.
:return: exit code
"""
return _lint() + _test()
# The following code is to allow tests to be run with `python setup.py test'.
# The main reason to make this possible is to allow tests to be run as part of
# Setuptools' automatic run of 2to3 on the source code. The recommended way to
# run tests is still `paver test_all'.
# See <http://pythonhosted.org/setuptools/python3.html>
# Code based on <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
class TestAllCommand(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
# These are fake, and just set to appease distutils and setuptools.
self.test_suite = True
self.test_args = []
def run_tests(self):
raise SystemExit(_test_all())
# define install_requires for specific Python versions
python_version_specific_requires = []
# as of Python >= 2.7 and >= 3.2, the argparse module is maintained within
# the Python standard library, otherwise we install it as a separate package
if sys.version_info < (2, 7) or (3, 0) <= sys.version_info < (3, 3):
python_version_specific_requires.append('argparse')
# See here for more options:
# <http://pythonhosted.org/setuptools/setuptools.html>
setup_dict = dict(
name=metadata.package,
version=metadata.version,
author=metadata.authors[0],
author_email=metadata.emails[0],
maintainer=metadata.authors[0],
maintainer_email=metadata.emails[0],
url=metadata.url,
description=metadata.description,
long_description=read('README.rst'),
# Find a list of classifiers here:
# <http://pypi.python.org/pypi?%3Aaction=list_classifiers>
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Documentation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Software Distribution',
],
packages=find_packages(exclude=(TESTS_DIRECTORY,)),
install_requires=[
'PyMySQL==0.6.7',
'mysql-replication>=0.8',
'blinker',
] + python_version_specific_requires,
# Allow tests to be run with `python setup.py test'.
tests_require=[
'pytest',
'mock',
'flake8',
],
cmdclass={'test': TestAllCommand},
zip_safe=False, # don't use eggs
entry_points={
'console_scripts': [
# '${package}_cli = ${package}.main:entry_point'
],
# if you have a gui, use this
# 'gui_scripts': [
# '${package}_gui = ${package}.gui:entry_point'
# ]
}
)
def main():
setup(**setup_dict)
if __name__ == '__main__':
main()
| |
#! /usr/bin/python
# -*- mode: python -*-
from contextlib import contextmanager
import glob
import itertools
import logbook
import os
import re
import stat
import shutil
import subprocess
import tempfile
from raven import Client
from rq_queues import default_queue, retry_queue
from sentry_dsn import SENTRY_DSN
sentry = Client(SENTRY_DSN)
_logger = logbook.Logger(__name__)
def unzip_docs(filename, dest, package_name, version):
directory = os.path.dirname(filename)
sphinx_dir = os.path.join(directory, "sphinx", "html")
if not os.path.isdir(sphinx_dir):
os.makedirs(sphinx_dir)
subprocess.check_call(["tar", "xzf", filename, "-C", sphinx_dir])
os.unlink(filename)
_write_metadata(directory, package_name=package_name, version=version)
_move_to_dest(directory, os.path.join(dest, package_name))
def build_docs(repo, dest, pypi=None, retries_left=5):
try:
temp_dest = tempfile.mkdtemp()
with _ensuring_virtualenv() as env:
with _temporary_checkout(repo, env, pypi) as temp_checkout:
temp_checkout.write_metadata(temp_dest)
temp_checkout.generate_sphinx(os.path.join(temp_dest, "sphinx"))
temp_checkout.generate_dash(os.path.join(temp_dest, "dash"))
temp_checkout.write_metadata(temp_dest)
_move_to_dest(temp_dest, os.path.join(dest, temp_checkout.get_package_name()))
return 0
except:
_logger.error("Exception while building docs", exc_info=True)
retries_left -= 1
if retries_left <= 0:
sentry.captureException()
else:
retry_queue.enqueue_call(
retry_build_docs,
args=(repo, dest, pypi, retries_left),
)
def retry_build_docs(*args):
"""
Meant to be called from cron job. Should only push the rebuild job again to the main (default) queue
"""
default_queue.enqueue_call(build_docs, args=args)
def _temporary_checkout(repo, env, pypi):
directory = os.path.join(tempfile.mkdtemp(), "src")
_execute_assert_success("git clone {} {}".format(repo, directory))
return Checkout(directory, env, pypi)
@contextmanager
def _ensuring_virtualenv():
virtualenv_path = "/tmp/virtualenvs/builder"
_execute_assert_success("virtualenv {}".format(virtualenv_path))
try:
for package in ["doc2dash", "Sphinx==1.1.3"]:
_execute_assert_success("{0}/bin/pip install --use-wheel --find-links /opt/devdocs/wheels {1}".format(virtualenv_path, package))
yield virtualenv_path
finally:
shutil.rmtree(virtualenv_path)
class Checkout(object):
def __init__(self, path, venv, pypi):
super(Checkout, self).__init__()
self._path = path
self._venv = venv
self._package_name = self._version = self._description = None
self._fetch_metadata()
self._install(pypi)
def get_package_name(self):
return self._package_name
def _fetch_metadata(self):
_execute_in_venv(self._venv, "python setup.py sdist", cwd=self._path)
[pkg_info_filename] = glob.glob("{}/*.egg-info/PKG-INFO".format(self._path))
with open(pkg_info_filename) as pkg_info_file:
metadata = {}
for metadata_entry in pkg_info_file:
key, value = metadata_entry.split(":", 1)
metadata[key.strip()] = value.strip()
self._package_name = metadata["Name"]
self._description = metadata["Description"]
self._version = _execute_assert_success(
"git describe --tags",
cwd=self._path, stdout=subprocess.PIPE).stdout.read().strip()
_logger.info("Processing %s (version %s)", self._package_name, self._version)
def _install(self, pypi):
command = "pip install --use-wheel --find-links /opt/devdocs/wheels"
if pypi:
command += " -i {0}".format(pypi)
command += " -e {0}".format(self._path)
_execute_in_venv(self._venv, command, cwd=self._path)
def __enter__(self):
return self
def __exit__(self, *_):
pass
def generate_sphinx(self, dest_dir):
_execute_in_venv(
self._venv,
"python setup.py build_sphinx --version {version} --release {version} -E -a --build-dir {dest}".format(dest=dest_dir, version=self._version),
cwd=self._path
)
def generate_dash(self, dest_dir):
temp_path = tempfile.mkdtemp()
try:
temp_sphinx_dir = os.path.join(tempfile.mkdtemp(), "sphinx")
with self._patched_repository_context():
self.generate_sphinx(temp_sphinx_dir)
_execute_in_venv(self._venv, "doc2dash {temp_sphinx_dir}/html -i {icon} -n {self._package_name} --destination {dest}/".format(
icon=_get_icon_path(),
self=self,
temp_sphinx_dir=temp_sphinx_dir,
dest=dest_dir,
))
_execute_assert_success("tar -czvf {0}.tgz {0}.docset".format(self._package_name), cwd=dest_dir)
shutil.rmtree(os.path.join(dest_dir, "{0}.docset".format(self._package_name)))
finally:
shutil.rmtree(temp_path)
@contextmanager
def _patched_repository_context(self):
try:
self._patch_repo()
yield
finally:
_execute_assert_success("git reset --hard", cwd=self._path)
def _patch_repo(self):
config_filename = os.path.join(self._path, "doc", "conf.py")
with open(config_filename) as f:
config = f.read()
config_dict = {"__file__" : config_filename}
exec(config, config_dict)
html_options = config_dict.get("html_theme_options", {})
html_options["nosidebar"] = True
re.sub(r"#?html_theme_options\s+=\s+\{[^\}]*\}", "html_theme_options={!r}".format(html_options), config)
with open(config_filename, "w") as f:
f.write(config)
def write_metadata(self, dest_dir):
_write_metadata(dest_dir, package_name=self._package_name, version=self._version, description=self._description)
def _get_icon_path():
return os.path.abspath(os.path.join(os.path.dirname(__file__), "docs_icon.png"))
def _fix_permissions(directory):
_fix_permissions_single_file(directory)
for path, dirnames, filenames in os.walk(directory):
for name in itertools.chain(dirnames, filenames):
full_path = os.path.join(path, name)
_fix_permissions_single_file(full_path)
def _fix_permissions_single_file(path):
mode = os.stat(path).st_mode | stat.S_IRGRP | stat.S_IROTH
if os.path.isdir(path):
mode |= stat.S_IXGRP | stat.S_IXOTH
os.chmod(path, mode)
def _move_to_dest(src, dest):
_logger.debug("move: {} --> {}", src, dest)
deleted = dest + ".deleted"
if os.path.exists(dest):
os.rename(dest, deleted)
os.rename(src, dest)
_fix_permissions(dest)
if os.path.exists(deleted):
shutil.rmtree(deleted)
def _execute_assert_success(cmd, *args, **kwargs):
_logger.debug("exec: {}", cmd)
p = subprocess.Popen(cmd, shell=True,
*args, **kwargs)
if 0 != p.wait():
raise ExecutionError("Command failed: cmd: {!r}".format(cmd))
return p
def _execute_in_venv(venv, cmd, *args, **kwargs):
_execute_assert_success("bash -c 'source {}/bin/activate && {}'".format(venv, cmd), *args, **kwargs)
class ExecutionError(Exception):
pass
def _write_metadata(dest, package_name, version, description=None):
metadata_dir = os.path.join(dest, "metadata")
if not os.path.isdir(metadata_dir):
os.makedirs(metadata_dir)
for field, value in (("package_name", package_name), ("version", version), ("description", description)):
if value is None:
continue
with open(os.path.join(metadata_dir, field), "w") as f:
f.write(value)
| |
#!/usr/bin/env python
"""
ORIGINAL AUTHOR:
midi.py -- MIDI classes and parser in Python
Placed into the public domain in December 2001 by Will Ware
Python MIDI classes: meaningful data structures that represent MIDI
events
and other objects. You can read MIDI files to create such objects, or
generate a collection of objects and use them to write a MIDI file.
Helpful MIDI info:
http://crystal.apana.org.au/ghansper/midi_introduction/midi_file_format.html
http://www.argonet.co.uk/users/lenny/midi/mfile.html
"""
from collections import OrderedDict
import exceptions
import json
import string
import sys
import types
debugflag = 0
def showstr(str1, n=16):
for x in str1[:n]:
print ('%02x' % ord(x)),
print
def getNumber(str1, length):
# MIDI uses big-endian for everything
sum1 = 0
for i in range(length):
sum1 = (sum1 << 8) + ord(str1[i])
return sum1, str1[length:]
def getVariableLengthNumber(str1):
sum1 = 0
i = 0
while 1:
x = ord(str1[i])
i = i + 1
sum1 = (sum1 << 7) + (x & 0x7F)
if not (x & 0x80):
return sum1, str1[i:]
def putNumber(num, length):
# MIDI uses big-endian for everything
lst = [ ]
for i in range(length):
n = 8 * (length - 1 - i)
lst.append(chr((num >> n) & 0xFF))
return ''.join(lst)
def putVariableLengthNumber(x):
lst = [ ]
while 1:
y, x = x & 0x7F, x >> 7
lst.append(chr(y + 0x80))
if x == 0:
break
lst.reverse()
lst[-1] = chr(ord(lst[-1]) & 0x7f)
return string.join(lst, "")
class EnumException(exceptions.Exception):
pass
class Enumeration:
def __init__(self, enumList):
lookup = { }
reverseLookup = { }
i = 0
uniqueNames = [ ]
uniqueValues = [ ]
for x in enumList:
if type(x) == types.TupleType:
x, i = x
if type(x) != types.StringType:
print EnumException, "enum name is not a string: " + x
if type(i) != types.IntType:
print EnumException, "enum value is not an integer: " + i
if x in uniqueNames:
print EnumException, "enum name is not unique: " + x
if i in uniqueValues:
print EnumException, "enum value is not unique for " + x
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __add__(self, other):
lst = [ ]
for k in self.lookup.keys():
lst.append((k, self.lookup[k]))
for k in other.lookup.keys():
lst.append((k, other.lookup[k]))
return Enumeration(lst)
def hasattr(self, attr):
return self.lookup.has_key(attr)
def has_value(self, attr):
return self.reverseLookup.has_key(attr)
def __getattr__(self, attr):
if not self.lookup.has_key(attr):
print AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
channelVoiceMessages = Enumeration([("NOTE_OFF", 0x80),
("NOTE_ON", 0x90),
("POLYPHONIC_KEY_PRESSURE", 0xA0),
("CONTROLLER_CHANGE", 0xB0),
("PROGRAM_CHANGE", 0xC0),
("CHANNEL_KEY_PRESSURE", 0xD0),
("PITCH_BEND", 0xE0)])
channelModeMessages = Enumeration([("ALL_SOUND_OFF", 0x78),
("RESET_ALL_CONTROLLERS", 0x79),
("LOCAL_CONTROL", 0x7A),
("ALL_NOTES_OFF", 0x7B),
("OMNI_MODE_OFF", 0x7C),
("OMNI_MODE_ON", 0x7D),
("MONO_MODE_ON", 0x7E),
("POLY_MODE_ON", 0x7F)])
metaEvents = Enumeration([("SEQUENCE_NUMBER", 0x00),
("TEXT_EVENT", 0x01),
("COPYRIGHT_NOTICE", 0x02),
("SEQUENCE_TRACK_NAME", 0x03),
("INSTRUMENT_NAME", 0x04),
("LYRIC", 0x05),
("MARKER", 0x06),
("CUE_POINT", 0x07),
("MIDI_CHANNEL_PREFIX", 0x20),
("MIDI_PORT", 0x21),
("END_OF_TRACK", 0x2F),
("SET_TEMPO", 0x51),
("SMTPE_OFFSET", 0x54),
("TIME_SIGNATURE", 0x58),
("KEY_SIGNATURE", 0x59),
("SEQUENCER_SPECIFIC_META_EVENT", 0x7F)])
# runningStatus appears to want to be an attribute of a MidiTrack. But
# it doesn't seem to do any harm to implement it as a global.
runningStatus = None
class MidiEvent:
def __init__(self, track):
self.track = track
self.time = None
self.duration = 0
self.channel = self.pitch = self.velocity = self.data = None
def setData(self, type1, time, duration, channel, pitch, velocity, data):
if type1 != None:
self.type = type1
self.duration = duration
self.time = time
self.channel = channel
self.pitch = pitch
self.velocity = velocity
self.data = data
def __cmp__(self, other):
# assert self.time != None and other.time != None
return cmp(self.time, other.time)
def __repr__(self):
r = ("<MidiEvent %s, t=%s, track=%s, channel=%s" %
(self.type,
repr(self.time),
self.track.index,
repr(self.channel)))
for attrib in ["pitch", "data", "velocity"]:
if getattr(self, attrib) != None:
r = r + ", " + attrib + "=" + repr(getattr(self,attrib))
return r + ">"
def read(self, time, duration, str1):
global runningStatus
self.time = time
self.duration = duration
# do we need to use running status?
if not (ord(str1[0]) & 0x80):
str1 = runningStatus + str1
runningStatus = x = str1[0]
x = ord(x)
y = x & 0xF0
z = ord(str1[1])
if channelVoiceMessages.has_value(y):
self.channel = (x & 0x0F) + 1
self.type = channelVoiceMessages.whatis(y)
if (self.type == "PROGRAM_CHANGE" or
self.type == "CHANNEL_KEY_PRESSURE"):
self.data = z
return str1[2:]
else:
self.pitch = z
self.velocity = ord(str1[2])
channel = self.track.channels[self.channel - 1]
if (self.type == "NOTE_OFF" or
(self.velocity == 0 and self.type == "NOTE_ON")):
channel.noteOff(self.pitch, self.time)
elif self.type == "NOTE_ON":
channel.noteOn(self.pitch, self.time, self.velocity)
return str1[3:]
elif y == 0xB0 and channelModeMessages.has_value(z):
self.channel = (x & 0x0F) + 1
self.type = channelModeMessages.whatis(z)
if self.type == "LOCAL_CONTROL":
self.data = (ord(str1[2]) == 0x7F)
elif self.type == "MONO_MODE_ON":
self.data = ord(str1[2])
return str1[3:]
elif x == 0xF0 or x == 0xF7:
self.type = {0xF0: "F0_SYSEX_EVENT", 0xF7: "F7_SYSEX_EVENT"}[x]
length, str1 = getVariableLengthNumber(str1[1:])
self.data = str1[:length]
return str1[length:]
elif x == 0xFF:
if not metaEvents.has_value(z):
print "Unknown meta event: FF %02X" % z
sys.stdout.flush()
print "Unknown midi event type"
self.type = metaEvents.whatis(z)
length, str1 = getVariableLengthNumber(str1[2:])
self.data = str1[:length]
return str1[length:]
print "Unknown midi event type"
def write(self):
duration = putVariableLengthNumber(self.duration)
sysex_event_dict = {"F0_SYSEX_EVENT": 0xF0,
"F7_SYSEX_EVENT": 0xF7}
if channelVoiceMessages.hasattr(self.type):
x = chr((self.channel - 1) + getattr(channelVoiceMessages, self.type))
if (self.type != "PROGRAM_CHANGE" and
self.type != "CHANNEL_KEY_PRESSURE"):
data = chr(self.pitch) + chr(self.velocity)
else:
data = chr(int(self.data))
return duration + x + data
elif channelModeMessages.hasattr(self.type):
x = getattr(channelModeMessages, self.type)
x = (chr(0xB0 + (self.channel - 1)) + chr(x) + chr(self.data))
return duration + x
elif sysex_event_dict.has_key(self.type):
str1 = chr(sysex_event_dict[self.type])
str1 = str1 + putVariableLengthNumber(len(self.data))
return duration + str1 + self.data
elif metaEvents.hasattr(self.type):
str1 = chr(0xFF) + chr(getattr(metaEvents, self.type))
str1 = str1 + putVariableLengthNumber(len(self.data))
return duration + str1 + self.data
else:
print "!!!! unknown midi event type: " + self.type
"""
register_note() is a hook that can be overloaded from a script that
imports this module. Here is how you might do that, if you wanted to
store the notes as tuples in a list. Including the distinction
between track and channel offers more flexibility in assigning voices.
import midi
notelist = [ ]
def register_note(t, c, p, v, t1, t2):
notelist.append((t, c, p, v, t1, t2))
midi.register_note = register_note
"""
def register_note(track_index, channel_index, pitch, velocity,
keyDownTime, keyUpTime):
pass
class MidiChannel:
"""A channel (together with a track) provides the continuity
connecting
a NOTE_ON event with its corresponding NOTE_OFF event. Together,
those
define the beginning and ending times for a Note."""
def __init__(self, track, index):
self.index = index
self.track = track
self.pitches = { }
def __repr__(self):
return "<MIDI channel %d>" % self.index
def noteOn(self, pitch, time, velocity):
self.pitches[pitch] = (time, velocity)
def noteOff(self, pitch, time):
if self.pitches.has_key(pitch):
keyDownTime, velocity = self.pitches[pitch]
register_note(self.track.index, self.index, pitch, velocity, keyDownTime, time)
del self.pitches[pitch]
# The case where the pitch isn't in the dictionary is illegal,
# I think, but we probably better just ignore it.
class DeltaTime(MidiEvent):
type = "DeltaTime"
def read(self, oldstr):
self.time, newstr = getVariableLengthNumber(oldstr)
return self.time, newstr
def write(self):
str1 = putVariableLengthNumber(self.time)
return str1
class MidiTrack:
def __init__(self, index):
self.index = index
self.events = [ ]
self.channels = [ ]
self.length = 0
for i in range(16):
self.channels.append(MidiChannel(self, i+1))
def setData(self, events):
self.events = events
def read(self, str1):
time = 0
assert str1[:4] == "MTrk"
length, str1 = getNumber(str1[4:], 4)
self.length = length
mystr = str1[:length]
remainder = str1[length:]
while mystr:
delta_t = DeltaTime(self)
dt, mystr = delta_t.read(mystr)
time = time + dt
self.events.append(delta_t)
e = MidiEvent(self)
mystr = e.read(time, dt, mystr)
self.events.append(e)
return remainder
def write(self):
# time1 = self.events[0].time
# build str1 using MidiEvents
str1 = ""
for e in self.events:
str1 = str1 + e.write()
return "MTrk" + putNumber(len(str1), 4) + str1
def __repr__(self):
r = "<MidiTrack %d -- %d events\n" % (self.index, len(self.events))
for e in self.events:
r = r + " " + `e` + "\n"
return r + " >"
class MidiFile:
def __init__(self):
self.file = None
self.format = 1
self.tracks = [ ]
self.ticksPerQuarterNote = None
self.ticksPerSecond = None
def setData(self, tracks, ticksPerQuarterNote, ticksPerSecond):
self.tracks = tracks
self.ticksPerQuarterNote = ticksPerQuarterNote
self.ticksPerSecond = ticksPerSecond
def open(self, filename, attrib="rb"):
if filename == None:
if attrib in ["r", "rb"]:
self.file = sys.stdin
else:
self.file = sys.stdout
else:
self.file = open(filename, attrib)
def __repr__(self):
r = "<MidiFile %d tracks\n" % len(self.tracks)
for t in self.tracks:
r = r + " " + `t` + "\n"
return r + ">"
def close(self):
self.file.close()
def read(self):
self.readstr(self.file.read())
def readstr(self, str1):
assert str1[:4] == "MThd"
length, str1 = getNumber(str1[4:], 4)
assert length == 6
format1, str1 = getNumber(str1, 2)
self.format = format1
assert format1 == 0 or format1 == 1 # dunno how to handle 2
numTracks, str1 = getNumber(str1, 2)
division, str1 = getNumber(str1, 2)
if division & 0x8000:
framesPerSecond = -((division >> 8) | -128)
ticksPerFrame = division & 0xFF
assert ticksPerFrame == 24 or ticksPerFrame == 25 or \
ticksPerFrame == 29 or ticksPerFrame == 30
if ticksPerFrame == 29: ticksPerFrame = 30 # drop frame
self.ticksPerSecond = ticksPerFrame * framesPerSecond
else:
self.ticksPerQuarterNote = division & 0x7FFF
for i in range(numTracks):
trk = MidiTrack(i)
str1 = trk.read(str1)
self.tracks.append(trk)
def write(self):
self.file.write(self.writestr())
def writestr(self):
division = self.ticksPerQuarterNote
# Don't handle ticksPerSecond yet, too confusing
assert (division & 0x8000) == 0
str1 = "MThd" + putNumber(6, 4) + putNumber(self.format, 2)
str1 = str1 + putNumber(len(self.tracks), 2)
str1 = str1 + putNumber(division, 2)
for trk in self.tracks:
str1 = str1 + trk.write()
return str1
def getMidiFile(path):
m = MidiFile()
m.open(path)
m.read()
m.close()
return m
def midiToJson(path):
midiDict = OrderedDict()
m = getMidiFile(path)
midiDict['format'] = str(m.format)
midiDict['ticksPerQuarterNote'] = m.ticksPerQuarterNote
midiDict['ticksPerSecond'] = m.ticksPerSecond
midiDict['nrOfTracks'] = len(m.tracks)
midiDict['tracks'] = []
tracks = m.tracks
for track in tracks:
trackDict = OrderedDict()
trackDict['index'] = track.index
trackDict['length'] = track.length
trackDict['events'] = []
events = track.events
for event in events:
if event.type == 'DeltaTime':
continue
evDict = OrderedDict()
evDict['trackId'] = event.track.index
evDict['type'] = event.type
evDict['time'] = event.time
evDict['channel'] = event.channel
evDict['pitch'] = event.pitch
evDict['velocity'] = event.velocity
evDict['duration'] = event.duration
if type(event.data) == int:
evDict['data'] = str(event.data).encode('hex')
elif not event.data is None:
evDict['data'] = event.data.encode('hex')
else:
evDict['data'] = None
trackDict['events'].append(evDict)
midiDict['tracks'].append(trackDict)
jsonResult = json.dumps(midiDict) #, ensure_ascii=False)
return jsonResult
def jsonToMidi(jsondata, path):
midiDict = json.loads(jsondata)
tracks = midiDict['tracks']
midiTracks = []
for track in tracks:
midiTrack = MidiTrack(track['index'])
events = track['events']
midiEvents = []
for event in events:
if event['type'] == 'DeltaTime':
continue
md = MidiEvent(midiTrack)
evData = event['data']
if evData is None:
evData = None
else:
evData = evData.decode('hex')
evType = event['type']
md.setData(evType, event['time'], event['duration'], event['channel'], event['pitch'], event['velocity'], evData)
midiEvents.append(md)
midiTrack.setData(midiEvents)
midiTracks.append(midiTrack)
m = MidiFile()
m.open(path, attrib="wb")
m.setData(midiTracks, midiDict['ticksPerQuarterNote'], midiDict['ticksPerSecond'])
m.write()
| |
import io
import hashlib
import synapse.axon as s_axon
import synapse.daemon as s_daemon
import synapse.lib.heap as s_heap
import synapse.telepath as s_telepath
import synapse.lib.service as s_service
from synapse.tests.common import *
craphash = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
asdfhash = '6a204bd89f3c8348afd5c77c717a097a'
class AxonTest(SynTest):
def test_axon_basics(self):
with self.getTestDir() as axondir:
axon = s_axon.Axon(axondir)
self.assertFalse( axon.has('md5',craphash) )
self.assertFalse( axon.has('md5',asdfhash) )
iden0 = axon.alloc(8)
self.assertIsNotNone( axon.chunk(iden0,b'asdfasdf') )
self.assertTrue( axon.has('md5',asdfhash) )
self.assertFalse( axon.has('md5',craphash) )
byts = b''.join( axon.bytes('md5',asdfhash) )
self.assertEqual(byts,b'asdfasdf')
axon.fini()
axon = s_axon.Axon(axondir)
self.assertTrue( axon.has('md5',asdfhash) )
self.assertFalse( axon.has('md5',craphash) )
byts = b''.join( axon.bytes('md5',asdfhash) )
self.assertEqual(byts,b'asdfasdf')
self.assertIsNone(axon.wants('md5', asdfhash, 8))
self.assertIsNotNone(axon.wants('md5', craphash, 8))
axon.fini()
def test_axon_sync(self):
with self.getTestDir() as axondir:
byts = os.urandom(128)
bytsmd5 = hashlib.md5(byts).hexdigest()
axon = s_axon.Axon(axondir,syncsize=64)
iden = axon.alloc(128)
for chnk in chunks(byts,10):
blob = axon.chunk(iden,chnk)
self.assertIsNotNone(blob)
self.assertTrue( axon.has('md5',bytsmd5) )
axon.fini()
def test_axon_host(self):
self.thisHostMustNot(platform='windows')
with self.getTestDir() as datadir:
with open(os.path.join(datadir,'foo'),'w') as fd:
fd.write('useless file to skip')
host = s_axon.AxonHost(datadir)
usage = host.usage()
props = {
'syncmax':s_axon.megabyte * 10,
'bytemax':s_axon.megabyte * 10,
}
axfo = host.add(**props)
self.assertIsNotNone( usage.get('total') )
axon = host.axons.get(axfo[0])
iden = axon.alloc(100)
blob = axon.chunk(iden,b'V'*100)
self.assertIsNotNone(blob)
self.assertTrue( axon.has( 'md5', blob[1].get('hash:md5') ) )
self.assertTrue( axon.has( 'sha1', blob[1].get('hash:sha1') ) )
self.assertTrue( axon.has( 'sha256', blob[1].get('hash:sha256') ) )
host.fini()
host = s_axon.AxonHost(datadir)
axon = host.axons.get(axfo[0])
self.assertTrue( axon.has( 'md5', blob[1].get('hash:md5') ) )
self.assertTrue( axon.has( 'sha1', blob[1].get('hash:sha1') ) )
self.assertTrue( axon.has( 'sha256', blob[1].get('hash:sha256') ) )
props = {
'syncmax':s_axon.megabyte * 10,
}
self.assertRaises(NotEnoughFree, host.add, **props)
host.fini()
def test_axon_host_clone(self):
self.thisHostMustNot(platform='windows')
busurl = 'local://%s/axons' % guid()
dmon = s_daemon.Daemon()
dmon.listen(busurl)
dmon.share('axons', s_service.SvcBus(), fini=True)
with self.getTestDir() as datadir:
dir0 = gendir(datadir,'host0')
dir1 = gendir(datadir,'host1')
dir2 = gendir(datadir,'host2')
opts = {
'axonbus':busurl,
}
host0 = s_axon.AxonHost(dir0,hostname='host0',**opts)
host1 = s_axon.AxonHost(dir1,hostname='host1',**opts)
host2 = s_axon.AxonHost(dir2,hostname='host2',**opts)
props = {
'syncmax':s_axon.megabyte,
'bytemax':s_axon.megabyte,
}
axfo0 = host0.add(**props)
axon0 = s_telepath.openlink( axfo0[1].get('link') )
self.assertTrue( axon0._waitClonesReady(timeout=2) )
iden = axon0.alloc(100)
blob = axon0.chunk(iden,b'V'*100)
self.assertIsNotNone(blob)
self.assertTrue( axon0.has( 'md5', blob[1].get('hash:md5') ) )
self.assertTrue( axon0.has( 'sha1', blob[1].get('hash:sha1') ) )
self.assertTrue( axon0.has( 'sha256', blob[1].get('hash:sha256') ) )
axon0.fini()
host0.fini()
host1.fini()
host2.fini()
dmon.fini()
def test_axon_clustered(self):
self.thisHostMustNot(platform='windows')
busurl = 'local://%s/axons' % guid()
dmon = s_daemon.Daemon()
dmon.listen(busurl)
dmon.share('axons', s_service.SvcBus(), fini=True)
with self.getTestDir() as datadir:
dir0 = gendir(datadir,'host0')
dir1 = gendir(datadir,'host1')
dir2 = gendir(datadir,'host2')
opts = {
'axonbus':busurl,
}
host0 = s_axon.AxonHost(dir0,hostname='host0',**opts)
host1 = s_axon.AxonHost(dir1,hostname='host1',**opts)
host2 = s_axon.AxonHost(dir2,hostname='host2',**opts)
props = {
'clones':2,
'syncmax':s_axon.megabyte,
'bytemax':s_axon.megabyte,
}
axfo0 = host0.add(**props)
axon0 = s_telepath.openlink( axfo0[1].get('link') )
# wait for clones to come online
self.assertTrue( axon0._waitClonesReady(timeout=2) )
#self.assertIsNotNone( usage.get('total') )
#axon = host.axons.get(iden)
iden = axon0.alloc(100)
blob = axon0.chunk(iden,b'V'*100)
self.assertIsNotNone(blob)
self.assertTrue( axon0.has( 'md5', blob[1].get('hash:md5') ) )
self.assertTrue( axon0.has( 'sha1', blob[1].get('hash:sha1') ) )
self.assertTrue( axon0.has( 'sha256', blob[1].get('hash:sha256') ) )
axon0.fini()
host0.fini()
host1.fini()
host2.fini()
dmon.fini()
def test_axon_cluster(self):
self.thisHostMustNot(platform='windows')
busurl = 'local://%s/axons' % guid()
dmon = s_daemon.Daemon()
dmon.listen(busurl)
dmon.share('axons', s_service.SvcBus(), fini=True)
svcprox = s_service.openurl( busurl )
axcluster = s_axon.AxonCluster(svcprox)
with self.getTestDir() as datadir:
dir0 = gendir(datadir,'host0')
dir1 = gendir(datadir,'host1')
dir2 = gendir(datadir,'host2')
opts = {
'axonbus':busurl,
}
host0 = s_axon.AxonHost(dir0,hostname='host0',**opts)
host1 = s_axon.AxonHost(dir1,hostname='host1',**opts)
host2 = s_axon.AxonHost(dir2,hostname='host2',**opts)
props = {
'clones':1,
'syncmax':s_axon.megabyte,
'bytemax':s_axon.megabyte,
}
axfo0 = host0.add(**props)
self.assertFalse( axcluster.has('md5',craphash) )
self.assertFalse( axcluster.has('md5',asdfhash) )
buf = b'asdfasdf'
iden = axcluster.alloc(len(buf))
self.assertIsNotNone( axcluster.chunk(iden, buf) )
self.assertFalse( axcluster.has('md5',craphash) )
self.assertTrue( axcluster.has('md5',asdfhash) )
blobs = axcluster.find('md5', craphash)
self.assertEqual(len(blobs), 0)
blobs = axcluster.find('md5', asdfhash)
self.assertEqual(len(blobs), 1)
blob = blobs[0]
byts = b''.join( axcluster.iterblob(blob) )
self.assertEqual(byts, buf)
blob[1].pop('.axon')
byts = b''.join( axcluster.iterblob(blob) )
self.assertEqual(byts, buf)
self.assertIsNone(axcluster.wants('md5', asdfhash, len(buf)))
self.assertIsNotNone(axcluster.wants('md5', craphash, len(buf)))
host0.fini()
host1.fini()
host2.fini()
dmon.fini()
def test_axon_autorun(self):
self.thisHostMustNot(platform='windows')
with self.getTestDir() as dirname:
opts = {
'autorun':2,
'syncmax':s_axon.megabyte,
'bytemax':s_axon.megabyte,
}
host = s_axon.AxonHost(dirname,**opts)
self.assertEqual( len(host.axons), 2 )
host.fini()
def test_axon_eatbytes(self):
self.thisHostMustNot(platform='windows')
with self.getTestDir() as dirname:
with s_axon.Axon(dirname) as axon:
blob0 = axon.eatbytes(b'visi')
with io.BytesIO(b'vertex') as fd:
blob1 = axon.eatfd(fd)
port = axon.getAxonInfo()[1].get('link')[1].get('port')
with s_axon.openurl('tcp://127.0.0.1/axon', port=port) as prox:
blob2 = axon.eatbytes(b'hurr')
with io.BytesIO(b'durr') as fd:
blob3 = axon.eatfd(fd)
self.eq( blob0[1].get('axon:blob'), '442f602ecf8230b2a59a44b4f845be27' )
self.eq( blob1[1].get('axon:blob'), 'd4552906c1f6966b96d27e6fc79441b5' )
self.eq( blob2[1].get('axon:blob'), '0d60960570ef6da0a15f68c24b420334' )
self.eq( blob3[1].get('axon:blob'), '97c11d1057f75c9c0b79090131709f62' )
#def test_axon_proxy(self):
| |
from typing import Optional, Union
import contextlib
import time
import uuid
from configupdater import ConfigUpdater, DuplicateSectionError
from configupdater.configupdater import Section
def validate(section: Section, file=''):
name = section.name
e = f"{file}[{name}]"
try:
control = section['control'].value
except KeyError:
raise ValueError(f"{e}: 'control' definition missing.")
t = ['port', 'socket', 'proxy']
if control not in t:
raise ValueError(f"{e}: Invalid value for 'control'. Use any of {t}.")
s = section.to_dict()
host = s.get('host')
port = s.get('port')
e = f'{e} / control = {control}'
if control in ['port', 'proxy'] :
if host is None:
raise ValueError(f"{e}: 'host' definition missing.")
if port is None:
raise ValueError(f"{e}: 'port' definition missing.")
if control == 'socket':
if host is None:
raise ValueError(f"{e}: 'host' definition missing.")
class CCNode:
def node_property(name, default=None, update_last_modified_flag=True):
@property
def prop(self):
try:
return self.section[name].value
except:
return default
@prop.setter
def prop(self, value):
try:
old = self.section[name]
except:
old = None
if value != old:
if value is not None:
self.section[name] = value
else:
del self.section[name]
if update_last_modified_flag is True:
self.file.set_section_last_modified(self.section.name)
return prop
label = node_property('label', None, False)
control = node_property('control', None)
host = node_property('host', None)
port = node_property('port', None)
password = node_property('password', None)
cookie = node_property('cookie', None)
del node_property
def __init__(self, file: 'CCFile', section: Section):
self.file = file
self.section = section
@property
def name(self) -> str:
return self.section.name
# @name.setter
# def name(self, value):
# self.section.name = value
def tick(self):
# self._last_modified = time.time()
self.file.tick(self.last_modified)
def move_after(self, reference: 'CCNode' = None):
section = None if reference is None else reference.section
self.file.move_after(self.section, section)
@property
def last_modified(self):
return self.file.get_section_last_modified(self.section.name)
@property
def is_default_node(self) -> bool:
return False
def remove(self):
self.file.set_section_last_modified(self.section.name)
return self.file.remove_node(self.section)
@property
def readonly(self):
return self.file.readonly
class CCFile:
def __init__(self, filename):
self.filename = filename
self.updater = ConfigUpdater()
self.updater.read(filename)
for section in self.updater.sections_blocks():
validate(section, filename)
self._readonly = None
self._last_modified = time.time()
self.slm = {}
def __iter__(self) -> CCNode:
for section in self.updater.sections_blocks():
yield CCNode(self, section)
@property
def readonly(self) -> bool:
file = None
try:
file = open(self.filename, 'w')
except:
readonly = True
else:
readonly = False
finally:
if file is not None:
file.close()
return readonly
def write(self):
with contextlib.suppress(Exception):
self.updater.update_file()
def move_after(self, section: Section, reference: Section = None):
self.updater.remove_section(section.name)
if reference is None:
sections = self.updater.sections()
if len(sections) > 0:
reference = self.updater[sections[0]]
reference.add_before.section(section)
else:
self.updater.add_section(section)
else:
reference.add_after.section(section)
self.tick()
def add_node(self) -> Optional[CCNode]:
if self.readonly is True:
return None
while 1:
id = uuid.uuid4().hex
try:
self.updater.add_section(id)
except DuplicateSectionError:
continue
except:
return None
else:
break
return CCNode(self, self.updater[id])
def remove_node(self, section: Section) -> bool:
if self.readonly is True:
return False
self.updater.remove_section(section.name)
return True
@property
def last_modified(self):
return int(self._last_modified)
def tick(self, last_modified=None):
self.write()
if last_modified is None:
last_modified = time.time()
self._last_modified = last_modified
def get_section_last_modified(self, id) -> int:
return self.slm.get(id, 0)
def set_section_last_modified(self, id, last_modified=None):
if last_modified is None:
last_modified = time.time()
self.slm[id] = last_modified
| |
"""
Unit tests for the extractor Python interface.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from extractor import extractor, util
class TestExtractor:
def _test_dna(self, s1, s2, expected_variants):
s1_swig = util.swig_str(s1)
s2_swig = util.swig_str(s2)
extracted = extractor.extract(s1_swig[0], s1_swig[1],
s2_swig[0], s2_swig[1], extractor.TYPE_DNA)
assert len(extracted.variants) == len(expected_variants)
for variant, expected_variant in zip(extracted.variants, expected_variants):
for attribute, expected_value in expected_variant.items():
assert getattr(variant, attribute) == expected_value
def test1(self):
self._test_dna(
'ATGATGATCAGATACAGTGTGATACAGGTAGTTAGACAA',
'ATGATTTGATCAGATACATGTGATACCGGTAGTTAGGACAA',
[{'sample_start': 0,
'sample_end': 5,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 5,
'type': 1,
'reference_start': 0},
{'sample_start': 5,
'sample_end': 7,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 5,
'type': 4,
'reference_start': 5},
{'sample_start': 7,
'sample_end': 18,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 16,
'type': 1,
'reference_start': 5},
{'sample_start': 18,
'sample_end': 18,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 17,
'type': 4,
'reference_start': 16},
{'sample_start': 18,
'sample_end': 26,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 25,
'type': 1,
'reference_start': 17},
{'sample_start': 26,
'sample_end': 27,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 26,
'type': 4,
'reference_start': 25},
{'sample_start': 27,
'sample_end': 35,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 34,
'type': 1,
'reference_start': 26},
{'sample_start': 35,
'sample_end': 36,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 34,
'type': 4,
'reference_start': 34},
{'sample_start': 36,
'sample_end': 41,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 39,
'type': 1,
'reference_start': 34}]
)
def test2(self):
self._test_dna(
'TAAGCACCAGGAGTCCATGAAGAAGATGGCTCCTGCCATGGAATCCCCTACTCTACTGTG',
'TAAGCACCAGGAGTCCATGAAGAAGCTGGATCCTCCCATGGAATCCCCTACTCTACTGTG',
[{'sample_start': 0,
'sample_end': 25,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 25,
'type': 1,
'reference_start': 0},
{'sample_start': 25,
'sample_end': 26,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 26,
'type': 4,
'reference_start': 25},
{'sample_start': 26,
'sample_end': 29,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 29,
'type': 1,
'reference_start': 26},
{'sample_start': 29,
'sample_end': 30,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 30,
'type': 4,
'reference_start': 29},
{'sample_start': 30,
'sample_end': 34,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 34,
'type': 1,
'reference_start': 30},
{'sample_start': 34,
'sample_end': 35,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 35,
'type': 4,
'reference_start': 34},
{'sample_start': 35,
'sample_end': 60,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 60,
'type': 1,
'reference_start': 35}]
)
def test3(self):
self._test_dna(
'TAAGCACCAGGAGTCCATGAAGAAGATGGCTCCTGCCATGGAATCCCCTACTCTA',
'TAAGCACCAGGAGTCCATGAAGAAGCCATGTCCTGCCATGGAATCCCCTACTCTA',
[{'sample_start': 0,
'sample_end': 25,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 25,
'type': 1,
'reference_start': 0},
{'sample_start': 25,
'sample_end': 29,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 29,
'type': 2,
'reference_start': 25},
{'sample_start': 29,
'sample_end': 30,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 30,
'type': 4,
'reference_start': 29},
{'sample_start': 30,
'sample_end': 55,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 55,
'type': 1,
'reference_start': 30}]
)
def test4(self):
self._test_dna(
'TAAGCACCAGGAGTCCATGAAGAAGATGGCTCCTGCCATGGAATCCCCTACTCTA',
'TAAGCACCAGGAGTCCATGAAGAAGCCATGTCCTGCCATGAATCCCCTACTCTA',
[{'sample_start': 0,
'sample_end': 25,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 25,
'type': 1,
'reference_start': 0},
{'sample_start': 25,
'sample_end': 29,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 29,
'type': 2,
'reference_start': 25},
{'sample_start': 29,
'sample_end': 30,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 30,
'type': 4,
'reference_start': 29},
{'sample_start': 30,
'sample_end': 39,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 39,
'type': 1,
'reference_start': 30},
{'sample_start': 39,
'sample_end': 39,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 40,
'type': 4,
'reference_start': 39},
{'sample_start': 39,
'sample_end': 54,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 55,
'type': 1,
'reference_start': 40}]
)
def test5(self):
self._test_dna(
'ACGTCGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
'ACGTCGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
[{'sample_start': 0,
'sample_end': 44,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 44,
'type': 1,
'reference_start': 0}]
)
def test6(self):
self._test_dna(
'ACGTCGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
'ACGTCGGTTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
[{'sample_start': 0,
'sample_end': 6,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 6,
'type': 1,
'reference_start': 0},
{'sample_start': 6,
'sample_end': 7,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 7,
'type': 4,
'reference_start': 6},
{'sample_start': 7,
'sample_end': 44,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 44,
'type': 1,
'reference_start': 7}]
)
def test7(self):
self._test_dna(
'ACGTCGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
'ACGTCGTTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
[{'sample_start': 0,
'sample_end': 6,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 6,
'type': 1,
'reference_start': 0},
{'sample_start': 6,
'sample_end': 6,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 7,
'type': 4,
'reference_start': 6},
{'sample_start': 6,
'sample_end': 43,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 44,
'type': 1,
'reference_start': 7}]
)
def test8(self):
self._test_dna(
'ACGTCGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
'ACGTCGTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
[{'sample_start': 0,
'sample_end': 6,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 6,
'type': 1,
'reference_start': 0},
{'sample_start': 6,
'sample_end': 6,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 8,
'type': 4,
'reference_start': 6},
{'sample_start': 6,
'sample_end': 42,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 44,
'type': 1,
'reference_start': 8}]
)
def test9(self):
self._test_dna(
'ACGTCGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
'ACGTCGCATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
[{'sample_start': 0,
'sample_end': 6,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 6,
'type': 1,
'reference_start': 0},
{'sample_start': 6,
'sample_end': 7,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 6,
'type': 4,
'reference_start': 6},
{'sample_start': 7,
'sample_end': 45,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 44,
'type': 1,
'reference_start': 6}]
)
def test10(self):
self._test_dna(
'ACGTCGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
'ACGTCGCCATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
[{'sample_start': 0,
'sample_end': 6,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 6,
'type': 1,
'reference_start': 0},
{'sample_start': 6,
'sample_end': 8,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 6,
'type': 4,
'reference_start': 6},
{'sample_start': 8,
'sample_end': 46,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 44,
'type': 1,
'reference_start': 6}]
)
def test11(self):
self._test_dna(
'ACGTCGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
'ACGTCGAATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
[{'sample_start': 0,
'sample_end': 7,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 7,
'type': 1,
'reference_start': 0},
{'sample_start': 7,
'sample_end': 8,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 7,
'type': 4,
'reference_start': 7},
{'sample_start': 8,
'sample_end': 45,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 44,
'type': 1,
'reference_start': 7}])
def test12(self):
self._test_dna(
'ACGTCGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
'ACGTCGAGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
[{'sample_start': 0,
'sample_end': 7,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 7,
'type': 1,
'reference_start': 0},
{'sample_start': 7,
'sample_end': 9,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 7,
'type': 4,
'reference_start': 7},
{'sample_start': 9,
'sample_end': 46,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 44,
'type': 1,
'reference_start': 7}]
)
def test13(self):
self._test_dna(
'ACGTCGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
'ACGTCGACGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
[{'sample_start': 0,
'sample_end': 7,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 7,
'type': 1,
'reference_start': 0},
{'sample_start': 7,
'sample_end': 10,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 7,
'type': 4,
'reference_start': 7},
{'sample_start': 10,
'sample_end': 47,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 44,
'type': 1,
'reference_start': 7}]
)
def test14(self):
self._test_dna(
'ACGTCGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
'ACGTCGCGAATCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
[{'sample_start': 0,
'sample_end': 6,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 6,
'type': 1,
'reference_start': 0},
{'sample_start': 6,
'sample_end': 11,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 11,
'type': 2,
'reference_start': 6},
{'sample_start': 11,
'sample_end': 44,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 44,
'type': 1,
'reference_start': 11}]
)
def test15(self):
self._test_dna(
'ACGTCGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
'ACGTCGCCTTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
[{'sample_start': 0,
'sample_end': 6,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 6,
'type': 1,
'reference_start': 0},
{'sample_start': 6,
'sample_end': 8,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 7,
'type': 4,
'reference_start': 6},
{'sample_start': 8,
'sample_end': 45,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 44,
'type': 1,
'reference_start': 7}]
)
def test16(self):
self._test_dna(
'ACGTCGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
'ACGTCGATTCGCTAGCTTCGTTTTGATAGATAGAGATATAGAGAT',
[{'sample_start': 0,
'sample_end': 20,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 20,
'type': 1,
'reference_start': 0},
{'sample_start': 20,
'sample_end': 24,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 23,
'type': 4,
'reference_start': 20},
{'sample_start': 24,
'sample_end': 45,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 44,
'type': 1,
'reference_start': 23}]
)
def test17(self):
self._test_dna(
'ACGTCGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
'ACGTCTCTTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
[{'sample_start': 0,
'sample_end': 5,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 5,
'type': 1,
'reference_start': 0},
{'sample_start': 5,
'sample_end': 7,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 7,
'type': 2,
'reference_start': 5},
{'sample_start': 7,
'sample_end': 44,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 44,
'type': 1,
'reference_start': 7}]
)
def test18(self):
self._test_dna(
'ACGTCGATTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
'ACGTCGTCTCGCTAGCTTCGGGGGATAGATAGAGATATAGAGAT',
[{'sample_start': 0,
'sample_end': 6,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 6,
'type': 1,
'reference_start': 0},
{'sample_start': 6,
'sample_end': 8,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 8,
'type': 4,
'reference_start': 6},
{'sample_start': 8,
'sample_end': 44,
'transposition_end': 0,
'transposition_start': 0,
'reference_end': 44,
'type': 1,
'reference_start': 8}]
)
| |
# -*- coding: utf-8 -*-
import os
from unittest import TestCase
from nose.tools import assert_equal
from nose.tools import assert_greater
from nose.tools import assert_in
from nose.tools import assert_is_instance
from nose.tools import assert_raises
from nose.tools import assert_true
from networkapiclient.ClientFactory import ClientFactory
from networkapiclient.exception import NetworkAPIClientError
NETWORKAPI_URL = os.getenv('NETWORKAPI_URL', 'http://10.0.0.2:8000/')
NETWORKAPI_USER = os.getenv('NETWORKAPI_USER', 'networkapi')
NETWORKAPI_PWD = os.getenv('NETWORKAPI_PWD', 'networkapi')
class TestNetworkIPv4(TestCase):
""" Class to test the network creation """
def setUp(self):
self.client = ClientFactory(NETWORKAPI_URL, NETWORKAPI_USER,
NETWORKAPI_PWD)
self.api_net_ipv4 = self.client.create_api_network_ipv4()
def test_list_networks(self):
""" List all IPv4 networks """
networks = self.api_net_ipv4.list()
assert_is_instance(networks, list)
assert_greater(len(networks), 1)
def test_create_new_ipv4_network_dinamically_by_prefix(self):
""" Create a new IPv4 network """
data = {
'vlan': 3,
'network_type': 2,
'environmentvip': None,
'prefix': 28,
}
network_id = self.api_net_ipv4.create([data])[0]['id']
network = self.api_net_ipv4.get([network_id])['networks'][0]
assert_equal(network['prefix'], 28)
assert_equal(network['broadcast'], '10.0.1.15')
self.api_net_ipv4.delete([network_id])
def test_create_new_ipv4_network_by_octets(self):
""" Creates new IPv4 network by the octets """
data = {
'vlan': 3,
'network_type': 2,
'environmentvip': None,
'prefix': 30,
'oct1': 10,
'oct2': 0,
'oct3': 1,
'oct4': 0,
}
network_id = self.api_net_ipv4.create([data])[0]['id']
network = self.api_net_ipv4.get([network_id])['networks'][0]
assert_equal(network['prefix'], 30)
assert_equal(network['broadcast'], '10.0.1.3')
assert_equal(network['mask_oct4'], 252)
self.api_net_ipv4.delete([network_id])
def test_create_a_network_with_only_network_type(self):
""" Create new IPv4 network using only the network_type """
data = {
'vlan': 3,
'network_type': 2,
}
network_id = self.api_net_ipv4.create([data])[0]['id']
network = self.api_net_ipv4.get([network_id])['networks'][0]
assert_equal(network['vlan'], data['vlan'])
assert_equal(network['network_type'], data['network_type'])
assert_equal(network['id'], network_id)
self.api_net_ipv4.delete([network_id])
def test_delete_network(self):
""" Deletes a ipv4 newtork """
data = {
'vlan': 3,
'network_type': 2,
}
network_id = self.api_net_ipv4.create([data])[0]['id']
response = self.api_net_ipv4.delete([network_id])
assert_is_instance(response, list)
assert_equal(len(response), 0)
def test_delete_a_non_existent_ipv4_network(self):
""" Tries to delete a non existent ipv4 network """
with assert_raises(NetworkAPIClientError):
response = self.api_net_ipv4.delete([5555])
def test_delete_an_active_network(self):
""" Tries to delete an active ipv4 network """
active_network_id = 7
with assert_raises(NetworkAPIClientError):
self.api_net_ipv4.delete([active_network_id])
def test_update_network(self):
""" Updating ipv4 network data """
data = {
'vlan': 3,
'network_type': 2,
}
network_id = self.api_net_ipv4.create([data])[0]['id']
data['network_type'] = 6
data.update({'id': network_id})
self.api_net_ipv4.update([data])
network = self.api_net_ipv4.get([network_id])['networks'][0]
assert_equal(network['network_type'], data['network_type'])
self.api_net_ipv4.delete([network_id])
def test_update_a_field_not_editable_on_a_network(self):
""" Tries to update a field not editable on a network """
data = {
'id': 7,
'vlan': 3,
'network_type': 2,
'active': False,
}
self.api_net_ipv4.update([data])
network = self.api_net_ipv4.get([data['id']])['networks'][0]
assert_true(network['active'])
def test_create_network_on_an_environment_that_have_a_router(self):
""" Creates a network on an environment that have a router """
data = {
'vlan': 10,
'network_type': 2,
}
expected_equipament_id = 12 # This equipament is a router
network_id = self.api_net_ipv4.create([data])[0]['id']
api_ip = self.client.create_api_ipv4()
network = api_ip.search(
search={'networkipv4': network_id},
include=['equipments']
)
equipments_ids = []
for ip in network['ips']:
for equipment in ip['equipments']:
equipments_ids.append(equipment['id'])
assert_in(expected_equipament_id, equipments_ids)
self.api_net_ipv4.delete([network_id])
def test_create_network_on_an_environment_that_have_two_routers(self):
""" Creates a network on an environment that have two routers """
data = {
'vlan': 3,
'network_type': 2,
}
expected_equipaments_id = (26, 27) # These equipaments are routers
network_id = self.api_net_ipv4.create([data])[0]['id']
api_ip = self.client.create_api_ipv4()
network = api_ip.search(
search={'networkipv4': network_id},
include=['equipments']
)
equipments_ids = []
for ip in network['ips']:
for equipment in ip['equipments']:
equipments_ids.append(equipment['id'])
assert_in(expected_equipaments_id[0], equipments_ids)
assert_in(expected_equipaments_id[1], equipments_ids)
self.api_net_ipv4.delete([network_id])
| |
"""
All methods must return media_ids that can be
passed into e.g. like() or comment() functions.
"""
from tqdm import tqdm
# STORY
def get_user_stories(self, user_id):
self.api.get_user_stories(user_id)
try:
if int(self.api.last_json["reel"]["media_count"]) > 0:
list_image = []
list_video = []
for item in self.api.last_json["reel"]["items"]:
if int(item["media_type"]) == 1: # photo
img = item["image_versions2"]["candidates"][0]["url"]
list_image.append(img)
elif int(item["media_type"]) == 2: # video
video = item["video_versions"][0]["url"]
list_video.append(video)
return list_image, list_video
else:
return [], []
except Exception as e:
self.logger.error(str(e))
return [], []
def get_self_story_viewers(self, story_id):
self.api.get_self_story_viewers(story_id)
return self.api.last_json
def get_user_reel(self, user_id):
self.api.get_user_reel(user_id)
return self.api.last_json
def get_media_owner(self, media_id):
self.api.media_info(media_id)
try:
return str(self.api.last_json.get("items")[0]["user"]["pk"])
except Exception as ex:
self.logger.error("Error: get_media_owner(%s)\n%s", media_id, ex)
return False
def get_user_tags_medias(self, user_id):
self.api.get_user_tags(user_id)
return [str(media["pk"]) for media in self.api.last_json["items"]]
def get_popular_medias(self):
self.api.get_popular_feed()
return [str(media["id"]) for media in self.api.last_json["items"]]
def get_your_medias(self, as_dict=False):
self.api.get_self_user_feed()
if as_dict:
return self.api.last_json.get("items")
return self.filter_medias(self.api.last_json.get("items"), False)
def get_archived_medias(self, as_dict=False):
self.api.get_archive_feed()
if as_dict:
return self.api.last_json.get("items")
return self.filter_medias(self.api.last_json.get("items"), False)
def get_timeline_medias(self, filtration=True):
if not self.api.get_timeline_feed():
self.logger.warning("Error while getting timeline feed.")
return []
feed_items = [
item["media_or_ad"]
for item in self.api.last_json["feed_items"]
if item.get("media_or_ad")
]
return self.filter_medias(feed_items, filtration)
def get_user_medias(self, user_id, filtration=True, is_comment=False):
user_id = self.convert_to_user_id(user_id)
self.api.get_user_feed(user_id)
if self.api.last_json["status"] == "fail":
self.logger.warning("This is a private account.")
return []
return self.filter_medias(
self.api.last_json.get("items"), filtration, is_comment=is_comment
)
def get_total_user_medias(self, user_id):
user_id = self.convert_to_user_id(user_id)
medias = self.api.get_total_user_feed(user_id)
if self.api.last_json["status"] == "fail":
self.logger.warning("This is a private account.")
return []
return self.filter_medias(medias, filtration=False)
def get_last_user_medias(self, user_id, amount):
user_id = self.convert_to_user_id(user_id)
medias = self.api.get_last_user_feed(user_id, amount)
if self.api.last_json["status"] == "fail":
self.logger.warning("This is a private account.")
return []
return self.filter_medias(medias, filtration=False)
def get_user_likers(self, user_id, media_count=10):
your_likers = set()
media_items = self.get_user_medias(user_id, filtration=False)
if not media_items:
self.logger.warning("Can't get %s medias." % user_id)
return []
for media_id in tqdm(
media_items[:media_count], desc="Getting %s media likers" % user_id
):
media_likers = self.get_media_likers(media_id)
your_likers |= set(media_likers)
return list(your_likers)
def get_hashtag_medias(self, hashtag, filtration=True):
if not self.api.get_hashtag_feed(hashtag):
self.logger.warning("Error while getting hashtag feed.")
return []
return self.filter_medias(self.api.last_json.get("items"), filtration)
def get_total_hashtag_medias(self, hashtag, amount=100, filtration=False):
medias = self.api.get_total_hashtag_feed(hashtag, amount)
return self.filter_medias(medias, filtration=filtration)
def get_geotag_medias(self, geotag, filtration=True):
# TODO: returns list of medias from geotag
pass
def get_locations_from_coordinates(self, latitude, longitude):
self.api.search_location(lat=latitude, lng=longitude)
all_locations = self.api.last_json.get("items")
filtered_locations = []
for location in all_locations:
location_lat = location["location"]["lat"]
location_lng = location["location"]["lng"]
if int(location_lat) == int(latitude):
if int(location_lng) == int(longitude):
filtered_locations.append(location)
return filtered_locations
def get_media_info(self, media_id):
if isinstance(media_id, dict):
return media_id
self.api.media_info(media_id)
if "items" not in self.api.last_json:
self.logger.info("Media with %s not found." % media_id)
return []
return self.api.last_json.get("items")
def get_timeline_users(self):
if not self.api.get_timeline_feed():
self.logger.warning("Error while getting timeline feed.")
return []
if "items" in self.api.last_json:
return [
str(i["user"]["pk"]) for i in self.api.last_json["items"] if i.get("user")
]
elif "feed_items" in self.api.last_json:
return [
str(i["media_or_ad"]["user"]["pk"])
for i in self.api.last_json["feed_items"]
if i.get("media_or_ad", {}).get("user")
]
self.logger.info("Users for timeline not found.")
return []
def get_hashtag_users(self, hashtag):
if not self.api.get_hashtag_feed(hashtag):
self.logger.warning("Error while getting hashtag feed.")
return []
return [str(i["user"]["pk"]) for i in self.api.last_json["items"]]
def get_geotag_users(self, geotag):
# TODO: returns list user_ids who just posted on this geotag
pass
def get_user_id_from_username(self, username):
if username not in self._usernames:
self.api.search_username(username)
self.very_small_delay()
if "user" in self.api.last_json:
self._usernames[username] = str(self.api.last_json["user"]["pk"])
else:
return None
return self._usernames[username]
def get_username_from_user_id(self, user_id):
user_info = self.get_user_info(user_id)
if user_info and "username" in user_info:
return str(user_info["username"])
return None # Not found
def get_user_info(self, user_id, use_cache=True):
user_id = self.convert_to_user_id(user_id)
if not use_cache or user_id not in self._user_infos:
self.api.get_username_info(user_id)
last_json = self.api.last_json
if last_json is None or "user" not in last_json:
return False
user_info = last_json["user"]
self._user_infos[user_id] = user_info
return self._user_infos[user_id]
def get_user_followers(self, user_id, nfollows):
user_id = self.convert_to_user_id(user_id)
followers = self.api.get_total_followers(user_id, nfollows)
return [str(item["pk"]) for item in followers][::-1] if followers else []
def get_user_following(self, user_id, nfollows=None):
user_id = self.convert_to_user_id(user_id)
following = self.api.get_total_followings(user_id, nfollows)
return [str(item["pk"]) for item in following][::-1] if following else []
def get_comment_likers(self, comment_id):
self.api.get_comment_likers(comment_id)
if "users" not in self.api.last_json:
self.logger.info("Comment with %s not found." % comment_id)
return []
return list(map(lambda user: str(user["pk"]), self.api.last_json["users"]))
def get_media_likers(self, media_id):
self.api.get_media_likers(media_id)
if "users" not in self.api.last_json:
self.logger.info("Media with %s not found." % media_id)
return []
return list(map(lambda user: str(user["pk"]), self.api.last_json["users"]))
def get_media_comments(self, media_id, only_text=False):
self.api.get_media_comments(media_id)
if "comments" not in self.api.last_json:
return []
if only_text:
return [str(item["text"]) for item in self.api.last_json["comments"]]
return self.api.last_json["comments"]
def get_media_comments_all(self, media_id, only_text=False, count=False):
has_more_comments = True
max_id = ""
comments = []
while has_more_comments:
self.api.get_media_comments(media_id, max_id=max_id)
for comment in self.api.last_json["comments"]:
comments.append(comment)
has_more_comments = self.api.last_json["has_more_comments"]
if count and len(comments) >= count:
comments = comments[:count]
has_more_comments = False
self.logger.info("Getting comments stopped by count (%s)." % count)
if has_more_comments:
max_id = self.api.last_json["next_max_id"]
if only_text:
return [
str(item["text"])
for item in sorted(
comments, key=lambda k: k["created_at_utc"], reverse=False
)
]
return sorted(comments, key=lambda k: k["created_at_utc"], reverse=False)
def get_media_commenters(self, media_id):
self.get_media_comments(media_id)
if "comments" not in self.api.last_json:
return []
return [str(item["user"]["pk"]) for item in self.api.last_json["comments"]]
def search_users(self, query):
self.api.search_users(query)
if "users" not in self.api.last_json:
self.logger.info("Users with %s not found." % query)
return []
return [str(user["pk"]) for user in self.api.last_json["users"]]
def get_comment(self):
try:
return self.comments_file.random().strip()
except IndexError:
return "Wow!"
def get_media_id_from_link(self, link):
if "instagram.com/p/" not in link:
self.logger.error("Unexpected link")
return False
link = link.split("/")
code = link[link.index("p") + 1]
alphabet = {
"-": 62,
"1": 53,
"0": 52,
"3": 55,
"2": 54,
"5": 57,
"4": 56,
"7": 59,
"6": 58,
"9": 61,
"8": 60,
"A": 0,
"C": 2,
"B": 1,
"E": 4,
"D": 3,
"G": 6,
"F": 5,
"I": 8,
"H": 7,
"K": 10,
"J": 9,
"M": 12,
"L": 11,
"O": 14,
"N": 13,
"Q": 16,
"P": 15,
"S": 18,
"R": 17,
"U": 20,
"T": 19,
"W": 22,
"V": 21,
"Y": 24,
"X": 23,
"Z": 25,
"_": 63,
"a": 26,
"c": 28,
"b": 27,
"e": 30,
"d": 29,
"g": 32,
"f": 31,
"i": 34,
"h": 33,
"k": 36,
"j": 35,
"m": 38,
"l": 37,
"o": 40,
"n": 39,
"q": 42,
"p": 41,
"s": 44,
"r": 43,
"u": 46,
"t": 45,
"w": 48,
"v": 47,
"y": 50,
"x": 49,
"z": 51,
}
result = 0
for char in code:
result = result * 64 + alphabet[char]
return result
def get_link_from_media_id(self, media_id):
if media_id.find("_"):
new = media_id.split("_")
media_id = new[0]
alphabet = {
"-": 62,
"1": 53,
"0": 52,
"3": 55,
"2": 54,
"5": 57,
"4": 56,
"7": 59,
"6": 58,
"9": 61,
"8": 60,
"A": 0,
"C": 2,
"B": 1,
"E": 4,
"D": 3,
"G": 6,
"F": 5,
"I": 8,
"H": 7,
"K": 10,
"J": 9,
"M": 12,
"L": 11,
"O": 14,
"N": 13,
"Q": 16,
"P": 15,
"S": 18,
"R": 17,
"U": 20,
"T": 19,
"W": 22,
"V": 21,
"Y": 24,
"X": 23,
"Z": 25,
"_": 63,
"a": 26,
"c": 28,
"b": 27,
"e": 30,
"d": 29,
"g": 32,
"f": 31,
"i": 34,
"h": 33,
"k": 36,
"j": 35,
"m": 38,
"l": 37,
"o": 40,
"n": 39,
"q": 42,
"p": 41,
"s": 44,
"r": 43,
"u": 46,
"t": 45,
"w": 48,
"v": 47,
"y": 50,
"x": 49,
"z": 51,
}
result = ""
while media_id:
media_id, char = int(media_id) // 64, int(media_id) % 64
result += list(alphabet.keys())[list(alphabet.values()).index(char)]
return "https://instagram.com/p/" + result[::-1] + "/"
def get_messages(self):
if self.api.get_inbox_v2():
return self.api.last_json
else:
self.logger.info("Messages were not found, " "something went wrong.")
return None
def convert_to_user_id(self, x):
x = str(x)
if not x.isdigit():
x = x.lstrip("@")
x = self.get_user_id_from_username(x)
# if type is not str than it is int so user_id passed
return x
def get_pending_follow_requests(self):
self.api.get_pending_friendships()
if self.api.last_json.get("users"):
return self.api.last_json.get("users")
else:
self.logger.info("There isn't any pending request.")
return []
def get_pending_thread_requests(self):
self.api.get_pending_inbox()
threads = self.api.last_json["inbox"]["threads"]
if not threads:
self.logger.info("There isn't any pending thread request.")
return threads
def get_muted_friends(self, muted_content):
"""
friends whom stories or posts are muted
"""
self.api.get_muted_friends(muted_content)
if self.api.last_json.get("users"):
return [str(user.get("pk")) for user in self.api.last_json.get("users")]
else:
self.logger.info(
"No users with muted {} " "in your friends".format(muted_content)
)
return []
| |
#!/usr/bin/env python
#filename: dinkum-install-from-git.py
#path: project/bin/
#repo: http://github.com/dinkumsoftware/dinkum.git
"""
This installs all the dinkumsoftware programs & code in ~/.dinkum/git-copy
by copying files from the git clone of dinkumsoftware.
It puts symbolic links in ~/doc/dinkum/* to all the documentation
files under ~/.dinkum/git-copy
It makes mild alteration to ~/.bashrc
Puts ~/dinkum/git-copy/bin on the PATH
Puts ~/.dinkum/git-copy/python on PYTHONPATH
It does NOT require sudo.
It assumes you have done a full git checkout of the source. (See EXAMPLES)
It's OK to run this as many times as you like. If it was previously installed,
it will be removed and reinstalled. If a new version of something shows up in git,
you can rerun this to get it on your machine
Any existing installation from git is UNINSTALLED and then reinstalled.
This is due to limitations of some underlying tools.
EXAMPLES
cd ~/<somewhere>
git clone http://github.com/dinkumsoftware/dinkum.git
<somewhere>/bin/dinkum-install-from-git
[optional] rm -rf ~/<somewhere>/dinkum # Don't need git copy after install
# But feel free to keep it
To undo these actions:
rm -rf ~/.dinkum
[optional] edit ~/.bashrc (see end of file) to remove dinkum stuff.
# This is not required, won't break anything if you leave it in
<todo> fix this
USAGE
dinkum-install-from-git
optional arguments:
-h, --help show this help message and exit
-v, --verbose Announce what it is doing
-d, --dry-run Announce what WOULD do, but don't do it
You will have to log out and log in to pick up the .bashrc changes.
If you want to just try it in a terminal window:
bash
EXIT STATUS
0 All is good
1 Something went wrong, maybe error printout
2 Some kind of exception tossed
AUTHOR
dinkumsoftware.com/tc
LICENSE
Copyright(c) 2019 Dinkum Software
Licensed under Apache Version 2.0, January 2004
http://www.apache.org/licenses/
Full license text at end of file.
VERSION
{program_version}
"""
program_version = 0.0
__doc__=__doc__.format(program_version=program_version)
# history:
# 2019-04-30 tc Initial
# 2019-04-30 tc Switched to shutil.copytree()
# 2019-05-05 tc refactoring
# 2020-02-03 tc Convert to python3, print ==> print()
import sys, os, traceback, argparse
import textwrap # dedent
# Other imports from dinkum.x.y.z must wait a bit
# until PYTHONPATH is set up
# Support code
def find_dinkum_git_root_dir(file_or_dir=sys.argv[0]) :
''' Starting with "file_or_dir", walks UP the file tree
until it finds a directory containing a .git.
That directory MUST be named "dinkum" for python imports
to work properly. Stops looking if hits / directory
An omitted "file_or_dir" starts with currently running
executable.
Returns the dinkum_git_root_dir
On error, throws an exception:
BadFileorDirArg file_or_dir is non-valid path
NoDotGitDirectory Could not find parent dir with .git in it
GitRootDirHasWrongName Found a git root dir, but not named "dinkum"
'''
# We expect we are running from a git clone copy of dinkumsoftware.
# file_or_dir: <x>/dinkum/a/b/c/whatever
# dinkum_git_root_dir: <x>/dinkum
# Validate the argument of where we start looking
wrk_dir = file_or_dir
wrk_dir = os.path.expanduser(wrk_dir) # ~ expansion
wrk_dir = os.path.abspath(wrk_dir)
if not os.path.isdir(wrk_dir) : # Passed a File instead of a directory?
wrk_dir = os.path.dirname(wrk_dir) # Yes, get enclosing directory
# Walk up the filetree, stopping when:
# hit the / directory -or-
# hit a directory with a .git subdir
root = '/'
git_dirname = '.git'
while True :
# Validate wrk_dir
if not os.path.isdir(wrk_dir) :
err_msg = """\
Not a directory: {wrk_dir}
arg file_or_dir:{file_or_dir} was NOT a valid path.
PROBABLE SOFTWARE ERROR."""
err_msg = textwrap.dedent(err_msg.format(wrk_dir=wrk_dir,
file_or_dir=file_or_dir))
class BadFileorDirArg(Exception) : pass
raise BadFileorDirArg(err_msg)
# Hit the top of the file tree ?
if wrk_dir == root or not wrk_dir :
# Failed to find the dinkum git root
err_msg = '''\
FAILED to locate dinkum git root dir.
Starting looking upward in file system from: {file_or_dir}
Did NOT find a directory with {git_dirname} in it.'''
err_msg = textwrap.dedent(err_msg.format(file_or_dir=file_or_dir,
git_dirname=git_dirname))
class NoDotGitDirectory(Exception) : pass
raise NoDotGitDirectory(err_msg)
# Is this a git root? i.e contains a .git sub directory
if os.path.isdir ( os.path.join( wrk_dir,
git_dirname)) :
# Yes, it is a git root
break
# wrk_dir is NOT a git root dir.
# Look upward
wrk_dir = os.path.dirname(wrk_dir)
# We found a git root directory: wrk_dir
# Make sure it has the required name
dinkum_git_root_reqd_name = "dinkum"
if os.path.basename(wrk_dir) != dinkum_git_root_reqd_name :
err_msg = '''\
Found a git root dir (has a .git subdir): {wrk_dir}
BUT it is NOT named: {dinkum_git_root_reqd_name}
It must have that name for python package imports to work.
Sure you are running from a good git clone of dinkumsoftware?'''
err_msg = textwrap.dedent(err_msg.format(wrk_dir=wrk_dir,
dinkum_git_root_reqd_name=dinkum_git_root_reqd_name))
class GitRootDirHasWrongName(Exception) : pass
raise GitRootDirHasWrongName(err_msg)
# Life is good, found the required git directory and it is properly named
return wrk_dir
# returned "err_msg" will be printed for user
def main ():
''' dinkum-install-from-git
optional arguments:
-h, --help show this help message and exit
-v, --verbose Announce what it is doing
-d, --dry-run Announce what WOULD do, but don't do it
Normally returns None. On error, return "err_msg" .
We install by copying from a git clone of dinkumsoftware to the
directory ~/.dinkum.
Find the git root dir and validate it
Find and use python library code in git
Enumerate all the files and subdirs to publish
Copy files and recursively copy subdirs
'''
# Specify and parse the command line arguments
parser = argparse.ArgumentParser(
# print document string "as is" on --help
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(__doc__))
parser.add_argument("-v", "--verbose",
help="Announce what it is doing",
action="store_true")
parser.add_argument("-d", "--dry-run",
help="Announce what WOULD do, but don't do it",
action="store_true")
parser.parse_args()
args = parser.parse_args()
verbose = args.verbose
dry_run = args.dry_run
# these are fragile times as we are a dinkum install
# program. Can't make assumptions about where to find
# stuff, in particular import of dinkum packages or location
# of executables
# We expect we are running from a git clone copy of dinkumsoftware
# Find the root, the one with the .git in it
git_root_dir = find_dinkum_git_root_dir()
# all the dinkum executables live in various dinkum/.../.../bin dirs
# dinkum/bin has symbolic links to all the executables
# We put dinkum/bin on front of the path
os.environ['PATH'] = os.path.join(git_root_dir, "bin") + os.pathsep + os.environ['PATH']
# diddle PYTHONPATH so that dinkum python imports work.
# our python packages in git live in the git_root_dir (which is named dinkum)
# git_root_dir is known to exist and be properly named
pkg_dir = git_root_dir
# sys.path must have the PARENT of the dinkum dir
# insert that at head of search path
sys.path.insert(0, os.path.dirname(pkg_dir))
# We can now use dinkum python package
from dinkum.project.install import install_from_git
install_from_git(git_root_dir, verbose, dry_run)
# Life is good
print ("Successfully installed DinkumSoftware's software from a git clone.")
# Warn them if we didn't do anything
if dry_run :
print ("** This was a DRY-RUN. Nothing was written or removed. **")
return None
if __name__ == '__main__':
try:
# Invoke the actual program
# It's an error if it returns anything
main_return = main()
if main_return :
print (main_return) # ERROR: print whatever it returns
# Pass back to the OS the proper exit code. 0 is good
sys.exit( 1 if main_return else 0)
except KeyboardInterrupt as e: # Ctrl-C
raise e
except SystemExit as e: # sys.exit()
raise e
except Exception as e:
print ('ERROR: uncaught EXCEPTION. Msg after traceback.')
traceback.print_exc() # stack dump (which prints err msg)
os._exit(2)
# full-license:
'''
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
| |
from TrendMicroVisionOne import (
Client,
add_or_remove_from_block_list,
quarantine_or_delete_email_message,
isolate_or_restore_connection,
terminate_process,
add_or_delete_from_exception_list,
add_to_suspicious_list,
delete_from_suspicious_list,
get_file_analysis_status,
get_file_analysis_report,
collect_file,
download_information_collected_file,
submit_file_to_sandbox,
get_task_status,
get_endpoint_info
)
# Provide valid API KEY
api_key = "test api key"
# Mock function for add to block list and remove from block list
def add_remove_blocklist_mock_response(*args, **kwargs):
return_value = {
"actionId": "88139521",
"taskStatus": "pending",
"data": {
"createdTime": 1589525651,
"executedTime": 1589525725,
"finishedTime": 1589525725,
"taskStatus": "success",
"error": {}
}
}
return return_value
# Test cases for add to block list
def test_add_blocklist(mocker):
"""Test add to block list with positive scenario."""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
add_remove_blocklist_mock_response)
client = Client("https://api.xdr.trendmicro.com", api_key)
args = {
"valueType": "file_sha1",
"targetValue": "2de5c1125d5f991842727ed8ea8b5fda0ffa249b",
"productId": "sao",
"description": "block info",
}
result = add_or_remove_from_block_list(
client, "trendmicro-visionone-add-to-block-list", args
)
assert result.outputs["taskStatus"] == "pending"
assert isinstance(result.outputs["actionId"], str)
assert result.outputs_prefix == "VisionOne.BlockList"
assert result.outputs_key_field == "actionId"
# Test cases for remove from block list
def test_remove_block_list(mocker):
"""Test remove block list positive scenario."""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
add_remove_blocklist_mock_response)
client = Client("https://api.xdr.trendmicro.com", api_key)
args = {
"valueType": "file_sha1",
"targetValue": "2de5c1125d5f991842727ed8ea8b5fda0ffa249b",
"productId": "sao",
"description": "block info",
}
result = add_or_remove_from_block_list(
client, "trendmicro-visionone-remove-from-block-list", args
)
assert result.outputs["taskStatus"] == "pending"
assert isinstance(result.outputs["actionId"], str)
assert result.outputs_prefix == "VisionOne.BlockList"
assert result.outputs_key_field == "actionId"
# Mock function for quarantine and delete email message
def quarantine_delete_email_mock_response(*args, **kwargs):
return_value = {
"actionId": "88139521",
"taskStatus": "pending",
"data": {
"createdTime": 1589525651,
"executedTime": 1589525725,
"finishedTime": 1589525725,
"taskStatus": "success",
"error": {},
},
}
return return_value
# Test cases for quarantine email message
def test_quarantine_email_message(mocker):
"""Test quarantine email message positive scenario."""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
quarantine_delete_email_mock_response
)
client = Client("https://api.xdr.trendmicro.com", api_key)
args = {
"messageId": (
"<CANUJTKTjto9GAHTr9V=TFqMZhRXqVn="
"MfSqmTdAMyv9PDX3k+vQ0w@mail.gmail.com>"
),
"mailBox": "kjshdfjksahd@trendenablement.com",
"messageDeliveryTime": "2021-12-09T14:00:12.000Z",
"productId": "sca",
"description": "quarantine info",
}
result = quarantine_or_delete_email_message(
client, "trendmicro-visionone-quarantine-email-message", args
)
assert result.outputs["taskStatus"] == "pending"
assert isinstance(result.outputs["actionId"], str)
assert result.outputs_prefix == "VisionOne.Email"
assert result.outputs_key_field == "actionId"
# Test cases for delete email message
def test_delete_email_message(mocker):
"""Test delete email message with positive scenario."""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
quarantine_delete_email_mock_response
)
client = Client("https://api.xdr.trendmicro.com", api_key)
args = {
"messageId": (
"<CANUJTKTqmuCT12v7mpbxZih_crrP"
"MfSqmTdAMyv9PDX3k+vQ0w@mail.gmail.com>"
),
"mailBox": "kjshdfjksahd@trendenablement.com",
"messageDeliveryTime": "2021-12-09T14:00:55.000Z",
"productId": "sca",
"description": "quarantine info",
}
result = quarantine_or_delete_email_message(
client, "trendmicro-visionone-delete-email-message", args
)
assert result.outputs["taskStatus"] == "pending"
assert isinstance(result.outputs["actionId"], str)
assert result.outputs_prefix == "VisionOne.Email"
assert result.outputs_key_field == "actionId"
# Mock function for isolate and restore endpoint
def isolate_restore_mock_response(*args, **kwargs):
return_value = {
"status": "string",
"actionId": "88139521",
"taskStatus": "pending",
"result": {
"computerId": "string",
},
"data": {
"createdTime": 1589525651,
"executedTime": 1589525725,
"finishedTime": 1589525725,
"taskStatus": "success",
"error": {},
},
}
return return_value
# Test cases for isolate endpoint
def test_isolate_endpoint(mocker):
"""Test isolate endpoint postive scenario."""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
isolate_restore_mock_response)
client = Client("https://api.xdr.trendmicro.com", api_key)
args = {
"endpoint": "hostname",
"productId": "sao",
"description": "isolate endpoint info",
}
result = isolate_or_restore_connection(
client, "trendmicro-visionone-isolate-endpoint", args
)
assert result.outputs["taskStatus"] == "pending"
assert result.outputs_prefix == "VisionOne.Endpoint_Connection"
assert result.outputs_key_field == "actionId"
# Test cases for restore endpoint
def test_restore_endpoint(mocker):
"""Test restore endpoint positive scenario."""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
isolate_restore_mock_response)
client = Client("https://api.xdr.trendmicro.com", api_key)
args = {
"endpoint": "hostname",
"productId": "sao",
"description": "restore endpoint info",
}
result = isolate_or_restore_connection(
client, "trendmicro-visionone-restore-endpoint-connection", args
)
assert result.outputs["taskStatus"] == "pending"
assert result.outputs_prefix == "VisionOne.Endpoint_Connection"
assert result.outputs_key_field == "actionId"
# Test cases for terminate process endpoint
def test_terminate_process_endpoint(mocker):
"""Test terminate process positive scenario."""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
isolate_restore_mock_response)
client = Client("https://api.xdr.trendmicro.com", api_key)
args = {
"endpoint": "00:50:56:81:87:A8",
"fileSha1": "12a08b7a3c5a10b64700c0aca1a47941b50a4f8b",
"productId": "sao",
"description": "terminate info",
"filename": "testfile",
}
result = terminate_process(client, args)
assert result.outputs["taskStatus"] == "pending"
assert isinstance(result.outputs["actionId"], str)
assert result.outputs_prefix == "VisionOne.Terminate_Process"
assert result.outputs_key_field == "actionId"
# Mock function for add and delete exception list
def add_delete_exception_mock_response(*args, **kwargs):
return_value = 20
return return_value
# Test cases for add exception list endpoint.
def test_add_object_to_exception_list(mocker):
"""Test add to exception list with positive scenario."""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
add_delete_exception_mock_response)
mocker.patch(
"TrendMicroVisionOne.Client.exception_list_count",
add_delete_exception_mock_response
)
client = Client("https://api.xdr.trendmicro.com", api_key)
args = {
"type": "domain",
"value": "1.alisiosanguera.com",
"description": "new key"
}
result = add_or_delete_from_exception_list(
client,
"trendmicro-visionone-add-objects-to-exception-list",
args
)
assert result.outputs["status_code"] is None
assert result.outputs_prefix == "VisionOne.Exception_List"
assert isinstance(result.outputs["total_items"], int)
assert result.outputs_key_field == "message"
# Test cases for delete exception list.
def test_delete_object_to_exception_list(mocker):
"""Test delete exception list positive scenario."""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
add_delete_exception_mock_response)
mocker.patch(
"TrendMicroVisionOne.Client.exception_list_count",
add_delete_exception_mock_response
)
client = Client("https://api.xdr.trendmicro.com", api_key)
args = {
"type": "domain",
"value": "1.alisiosanguera.com.cn",
"description": "testing exception",
}
result = add_or_delete_from_exception_list(
client, "trendmicro-visionone-delete-objects-from-exception-list", args
)
assert result.outputs["status_code"] is None
assert isinstance(result.outputs["total_items"], int)
assert result.outputs_prefix == "VisionOne.Exception_List"
assert result.outputs_key_field == "message"
# Mock response for add and delete suspicious list
def add_delete_suspicious_mock_response(*args, **kwargs):
return_value = 20
return return_value
# Test cases for add suspicious object list
def test_add_object_to_suspicious_list(mocker):
"""Test add to suspicious list with poistive scenario."""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
add_delete_suspicious_mock_response)
mocker.patch(
"TrendMicroVisionOne.Client.suspicious_list_count",
add_delete_suspicious_mock_response
)
client = Client("https://api.xdr.trendmicro.com", api_key)
args = {
"type": "domain",
"value": "1.alisiosanguera.com.cn",
"description": "Example Suspicious Object.",
"scanAction": "log",
"riskLevel": "high",
"expiredDay": 15,
}
result = add_to_suspicious_list(client, args)
assert result.outputs["status_code"] is None
assert isinstance(result.outputs["total_items"], int)
assert result.outputs_prefix == "VisionOne.Suspicious_List"
assert result.outputs_key_field == "message"
# Test cases for delete suspicious object list
def test_delete_object_from_suspicious_list(mocker):
"""Test delete object from suspicious list."""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
add_delete_suspicious_mock_response)
mocker.patch(
"TrendMicroVisionOne.Client.suspicious_list_count",
add_delete_suspicious_mock_response
)
client = Client("https://api.xdr.trendmicro.com", api_key)
args = {"type": "domain", "value": "1.alisiosanguera.com.cn"}
result = delete_from_suspicious_list(client, args)
assert result.outputs["status_code"] is None
assert isinstance(result.outputs["total_items"], int)
assert result.outputs_prefix == "VisionOne.Suspicious_List"
assert result.outputs_key_field == "message"
# Mock response for Get file analysis status
def mock_file_status_response(*args, **kwargs):
return_response = {
"code": "Success",
"message": "Success",
"data": {
"taskId": "012e4eac-9bd9-4e89-95db-77e02f75a6f3",
"taskStatus": "finished",
"digest": {
"md5": "4ac174730d4143a119037d9fda81c7a9",
"sha1": "fb5608fa03de204a12fe1e9e5275e4a682107471",
"sha256": (
"65b0f656e79ab84ca17807158e3ea"
"c206bd58be6689ddeb95956a48748d138f9"
),
},
"analysisSummary": {
"analysisCompletionTime": "2021-05-07T03:08:40Z",
"riskLevel": "high",
"description": "",
"detectionNameList": [],
"threatTypeList": [],
"trueFileType": "exe",
},
"reportId": "012e4eac-9bd9-4e89-95db-77e02f75a6f3",
},
}
return return_response
# Test Cases for Get file analysis status
def test_get_file_status(mocker):
"""Test to get status of file"""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
mock_file_status_response)
args = {"taskId": "921674d0-9735-4f79-b7de-c852e00a003d"}
client = Client("https://api.xdr.trendmicro.com", api_key)
result = get_file_analysis_status(client, args)
assert result.outputs["message"] == "Success"
assert result.outputs["code"] == "Success"
assert result.outputs["task_id"] == "012e4eac-9bd9-4e89-95db-77e02f75a6f3"
assert result.outputs["taskStatus"] == "finished"
assert result.outputs["report_id"] == (
"012e4eac-9bd9-4e89-95db-77e02f75a6f3")
assert result.outputs_prefix == "VisionOne.File_Analysis_Status"
assert result.outputs_key_field == "message"
def test_get_report_id(mocker):
"""Test to get status of file with report id"""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
mock_file_status_response)
args = {"taskId": "921674d0-9735-4f79-b7de-c852e00a003d"}
client = Client("https://api.xdr.trendmicro.com", api_key)
result = get_file_analysis_status(client, args)
assert result.outputs["message"] == "Success"
assert result.outputs["code"] == "Success"
assert result.outputs["report_id"] == (
"012e4eac-9bd9-4e89-95db-77e02f75a6f3")
assert result.outputs_prefix == "VisionOne.File_Analysis_Status"
assert result.outputs_key_field == "message"
# Mock response for Get file analysis report
def mock_file_report_response(*args, **kwargs):
return_response = {
"code": "Success",
"message": "Success",
"data": [
{
"type": "ip",
"value": "6.6.6.6",
"riskLevel": "high",
"analysisCompletionTime": "2021-05-07T03:08:40Z",
"expiredTime": "2021-06-07T03:08:40Z",
"rootFileSha1": "fb5608fa03de204a12fe1e9e5275e4a682107471",
}
],
}
return return_response
# Test cases for get file analysis report
def test_get_file_analysis_report(mocker):
"""Test get file analysis report data."""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
mock_file_report_response)
client = Client("https://api.xdr.trendmicro.com", api_key)
args = {
"reportId": "800f908d-9578-4333-91e5-822794ed5483",
"type": "suspiciousObject",
}
result = get_file_analysis_report(client, args)
assert result.outputs["message"] == "Success"
assert result.outputs["code"] == "Success"
assert isinstance(result.outputs["data"][0]["type"], str)
assert isinstance(result.outputs["data"][0]["value"], str)
assert isinstance(result.outputs["data"][0]["risk_level"], str)
assert isinstance(result.outputs["data"][0]["analysis_completion_time"], str)
assert isinstance(result.outputs["data"][0]["expired_time"], str)
assert isinstance(result.outputs["data"][0]["root_file_sha1"], str)
def test_get_file_analysis_report_1(mocker):
"""Test get file analysis report data."""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
mock_file_report_response)
client = Client("https://api.xdr.trendmicro.com", api_key)
args = {
"reportId": "800f908d-9578-4333-91e5-822794ed5483",
"type": "suspiciousObject",
}
result = get_file_analysis_report(client, args)
assert result.outputs["message"] == "Success"
assert result.outputs["code"] == "Success"
assert len(result.outputs["data"]) > 0
# Mock function for isolate and restore endpoint
def mock_collect_file(*args, **kwargs):
return_value = {
"status": "string",
"actionId": "88139521",
"taskStatus": "pending",
"result": {
"computerId": "string",
},
"data": {
"createdTime": 1589525651,
"executedTime": 1589525725,
"finishedTime": 1589525725,
"taskStatus": "success",
"error": {},
}
}
return return_value
# Test cases for collect forensic file.
def test_collect_forensic_file(mocker):
"""Test collect file with positive scenario."""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
mock_collect_file)
client = Client("https://api.xdr.trendmicro.com", api_key)
args = {
"endpoint": "hostname",
"description": "collect file",
"productId": "sao",
"filePath": (
"/file_path/sample.txt"
),
"os": "linux",
}
result = collect_file(client, args)
assert result.outputs["taskStatus"] == "pending"
assert isinstance(result.outputs["actionId"], str)
assert result.outputs_prefix == "VisionOne.Collect_Forensic_File"
assert result.outputs_key_field == "actionId"
# Mock for downloaded file information
def mock_download_collected_file_info_response(*args, **kwargs):
return_response = {
"data": {
"url": "string",
"expires": "2011-10-05T14:48:00.000Z",
"password": "string",
"filename": "string",
}
}
return return_response
# Test Cases for Collected downloaded file information.
def test_get_forensic_file_information(mocker):
"""Test endpoint to get collected file infomation based on action id"""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
mock_download_collected_file_info_response
)
args = {"actionId": "00000700"}
client = Client("https://api.xdr.trendmicro.com", api_key)
result = download_information_collected_file(client, args)
assert isinstance(result.outputs["url"], str)
assert isinstance(result.outputs["expires"], str)
assert isinstance(result.outputs["password"], str)
assert isinstance(result.outputs["filename"], str)
# Mock response for submit file to sandbox.
def mock_submit_file_to_sandbox_reponse(*args, **kwargs):
return_response = {
"code": "Success",
"message": "Success",
"data": {
"taskId": "012e4eac-9bd9-4e89-95db-77e02f75a6f3",
"digest": {
"md5": "4ac174730d4143a119037d9fda81c7a9",
"sha1": "fb5608fa03de204a12fe1e9e5275e4a682107471",
"sha256": (
"65b0f656e79ab84ca17807158e3ea"
"c206bd58be6689ddeb95956a48748d138f9"
)
},
},
}
return return_response
# Mock response for submit file to sandbox.
def mocked_requests_get(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code, content):
self.json_data = json_data
self.status_code = status_code
self.content = content
def json(self):
return self.json_data
if args[0] == 'http://someurl.com/test.json':
return MockResponse({"key1": "value1"}, 200, "response")
elif args[0] == 'http://someotherurl.com/anothertest.json':
return MockResponse({"key2": "value2"}, 200, "response")
return MockResponse(None, 404, None)
# Mock response for submit file to sandbox.
def mocked_requests_post(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code, content):
self.json_data = json_data
self.status_code = status_code
self.content = content
def json(self):
return {
"code": "Success",
"message": "Success",
"data": {
"taskId": "012e4eac-9bd9-4e89-95db-77e02f75a6f3",
"digest": {
"md5": "4ac174730d4143a119037d9fda81c7a9",
"sha1": "fb5608fa03de204a12fe1e9e5275e4a682107471",
"sha256": (
"65b0f656e79ab84ca17807158e3ea",
"c206bd58be6689ddeb95956a48748d138f9"
)
},
},
}
def raise_for_status(self):
return True
if args[0] == 'http://someurl.com/test.json':
return MockResponse({"key1": "value1"}, 200, "response")
elif args[0] == 'http://someotherurl.com/anothertest.json':
return MockResponse({"key2": "value2"}, 200, "response")
return MockResponse(None, 404, None)
def test_submit_file_to_sandbox(mocker):
mocker.patch(
"TrendMicroVisionOne.requests.get",
mocked_requests_get
)
mocker.patch(
"TrendMicroVisionOne.requests.post",
mocked_requests_post
)
args = {
"fileUrl": "http://adsd.com",
"fileName": "XDR_ResponseApp_CollectFile_ID00000700_20211206T134158Z.7z",
"archivePassword": "6hn467c8",
"documentPassword": ""
}
client = Client("https://api.xdr.trendmicro.com", api_key)
result = submit_file_to_sandbox(client, args)
assert result.outputs["message"] == "Success"
assert result.outputs["code"] == "Success"
# Mock function for check task status
def check_task_status_mock_response(*args, **kwargs):
return_value = {
"data": {
"createdTime": 1589525651,
"executedTime": 1589525725,
"finishedTime": 1589525725,
"taskStatus": "success",
"error": {}
}
}
return return_value
def test_check_task_status(mocker):
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
check_task_status_mock_response)
mocker.patch(
"CommonServerPython.ScheduledCommand.raise_error_if_not_supported",
lambda: None
)
client = Client("https://api.xdr.trendmicro.com", api_key)
args = {
"actionId": "00001108"
}
result = get_task_status(args, client)
assert result.outputs["taskStatus"] == "success"
# Mock for downloaded file information
def mock_get_endpoint_info_response(*args, **kwargs):
return_response = {
"status": "SUCCESS",
"errorCode": 0,
"message": "message",
"result": {
"logonAccount": {
"value": [
"DOMAIN\\username"
],
"updateAt": 0
},
"hostname": {
"value": "hostname",
"updateAt": 0
},
"macAddr": {
"value": "00:11:22:33:44:55",
"updateAt": 0
},
"ip": {
"value": "192.168.1.1",
"updateAt": 0
},
"osName": "Windows",
"osVersion": "10.0.19042",
"osDescription": "Windows 10 Pro (64 bit) build 19042",
"productCode": "xes"
}
}
return return_response
# Test case for get endpoint information.
def test_get_endpoint_information(mocker):
"""Test get information from endpoint based on computerid"""
mocker.patch(
"TrendMicroVisionOne.Client.http_request",
mock_get_endpoint_info_response
)
args = {"endpoint": "hostname"}
client = Client("https://api.xdr.trendmicro.com", api_key)
result = get_endpoint_info(client, args)
assert result.outputs["status"] == "SUCCESS"
assert isinstance(result.outputs["message"], str)
assert isinstance(result.outputs["hostname"], str)
assert isinstance(result.outputs["ip"], str)
assert isinstance(result.outputs["macAddr"], str)
assert isinstance(result.outputs["osDescription"], str)
assert isinstance(result.outputs["osName"], str)
assert isinstance(result.outputs["osVersion"], str)
assert isinstance(result.outputs["productCode"], str)
| |
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manage hosts in the current zone.
"""
import collections
import functools
import time
try:
from collections import UserDict as IterableUserDict # Python 3
except ImportError:
from UserDict import IterableUserDict # Python 2
import iso8601
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from nova import context as context_module
from nova import exception
from nova.i18n import _LI, _LW
from nova import objects
from nova.pci import stats as pci_stats
from nova.scheduler import filters
from nova.scheduler import weights
from nova import utils
from nova.virt import hardware
host_manager_opts = [
cfg.MultiStrOpt('scheduler_available_filters',
default=['nova.scheduler.filters.all_filters'],
help='Filter classes available to the scheduler which may '
'be specified more than once. An entry of '
'"nova.scheduler.filters.all_filters" '
'maps to all filters included with nova.'),
cfg.ListOpt('scheduler_default_filters',
default=[
'RetryFilter',
'AvailabilityZoneFilter',
'RamFilter',
'DiskFilter',
'ComputeFilter',
'ComputeCapabilitiesFilter',
'ImagePropertiesFilter',
'ServerGroupAntiAffinityFilter',
'ServerGroupAffinityFilter',
],
help='Which filter class names to use for filtering hosts '
'when not specified in the request.'),
cfg.ListOpt('scheduler_weight_classes',
default=['nova.scheduler.weights.all_weighers'],
help='Which weight class names to use for weighing hosts'),
cfg.BoolOpt('scheduler_tracks_instance_changes',
default=True,
help='Determines if the Scheduler tracks changes to instances '
'to help with its filtering decisions.'),
]
CONF = cfg.CONF
CONF.register_opts(host_manager_opts)
LOG = logging.getLogger(__name__)
HOST_INSTANCE_SEMAPHORE = "host_instance"
class ReadOnlyDict(IterableUserDict):
"""A read-only dict."""
def __init__(self, source=None):
self.data = {}
if source:
self.data.update(source)
def __setitem__(self, key, item):
raise TypeError()
def __delitem__(self, key):
raise TypeError()
def clear(self):
raise TypeError()
def pop(self, key, *args):
raise TypeError()
def popitem(self):
raise TypeError()
def update(self):
raise TypeError()
@utils.expects_func_args('self', 'spec_obj')
def set_update_time_on_success(function):
"""Set updated time of HostState when consuming succeed."""
@functools.wraps(function)
def decorated_function(self, spec_obj):
return_value = None
try:
return_value = function(self, spec_obj)
except Exception as e:
# Ignores exception raised from consume_from_request() so that
# booting instance would fail in the resource claim of compute
# node, other suitable node may be chosen during scheduling retry.
LOG.warning(_LW("Selected host: %(host)s failed to consume from "
"instance. Error: %(error)s"),
{'host': self.host, 'error': e})
else:
now = timeutils.utcnow()
# NOTE(sbauza): Objects are UTC tz-aware by default
self.updated = now.replace(tzinfo=iso8601.iso8601.Utc())
return return_value
return decorated_function
class HostState(object):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
def __init__(self, host, node, compute=None):
self.host = host
self.nodename = node
# Mutable available resources.
# These will change as resources are virtually "consumed".
self.total_usable_ram_mb = 0
self.total_usable_disk_gb = 0
self.disk_mb_used = 0
self.free_ram_mb = 0
self.free_disk_mb = 0
self.vcpus_total = 0
self.vcpus_used = 0
self.pci_stats = None
self.numa_topology = None
# Additional host information from the compute node stats:
self.num_instances = 0
self.num_io_ops = 0
# Other information
self.host_ip = None
self.hypervisor_type = None
self.hypervisor_version = None
self.hypervisor_hostname = None
self.cpu_info = None
self.supported_instances = None
# Resource oversubscription values for the compute host:
self.limits = {}
# Generic metrics from compute nodes
self.metrics = None
# List of aggregates the host belongs to
self.aggregates = []
# Instances on this host
self.instances = {}
# Allocation ratios for this host
self.ram_allocation_ratio = None
self.cpu_allocation_ratio = None
self.updated = None
if compute:
self.update_from_compute_node(compute)
def update_service(self, service):
self.service = ReadOnlyDict(service)
def update_from_compute_node(self, compute):
"""Update information about a host from a ComputeNode object."""
if (self.updated and compute.updated_at
and self.updated > compute.updated_at):
return
all_ram_mb = compute.memory_mb
# Assume virtual size is all consumed by instances if use qcow2 disk.
free_gb = compute.free_disk_gb
least_gb = compute.disk_available_least
if least_gb is not None:
if least_gb > free_gb:
# can occur when an instance in database is not on host
LOG.warning(_LW("Host %(hostname)s has more disk space than "
"database expected "
"(%(physical)s GB > %(database)s GB)"),
{'physical': least_gb, 'database': free_gb,
'hostname': compute.hypervisor_hostname})
free_gb = min(least_gb, free_gb)
free_disk_mb = free_gb * 1024
self.disk_mb_used = compute.local_gb_used * 1024
# NOTE(jogo) free_ram_mb can be negative
self.free_ram_mb = compute.free_ram_mb
self.total_usable_ram_mb = all_ram_mb
self.total_usable_disk_gb = compute.local_gb
self.free_disk_mb = free_disk_mb
self.vcpus_total = compute.vcpus
self.vcpus_used = compute.vcpus_used
self.updated = compute.updated_at
self.numa_topology = compute.numa_topology
self.pci_stats = pci_stats.PciDeviceStats(
compute.pci_device_pools)
# All virt drivers report host_ip
self.host_ip = compute.host_ip
self.hypervisor_type = compute.hypervisor_type
self.hypervisor_version = compute.hypervisor_version
self.hypervisor_hostname = compute.hypervisor_hostname
self.cpu_info = compute.cpu_info
if compute.supported_hv_specs:
self.supported_instances = [spec.to_list() for spec
in compute.supported_hv_specs]
else:
self.supported_instances = []
# Don't store stats directly in host_state to make sure these don't
# overwrite any values, or get overwritten themselves. Store in self so
# filters can schedule with them.
self.stats = compute.stats or {}
# Track number of instances on host
self.num_instances = int(self.stats.get('num_instances', 0))
self.num_io_ops = int(self.stats.get('io_workload', 0))
# update metrics
self.metrics = objects.MonitorMetricList.from_json(compute.metrics)
# update allocation ratios given by the ComputeNode object
self.cpu_allocation_ratio = compute.cpu_allocation_ratio
self.ram_allocation_ratio = compute.ram_allocation_ratio
@set_update_time_on_success
def consume_from_request(self, spec_obj):
"""Incrementally update host state from an RequestSpec object."""
disk_mb = (spec_obj.root_gb +
spec_obj.ephemeral_gb) * 1024
ram_mb = spec_obj.memory_mb
vcpus = spec_obj.vcpus
self.free_ram_mb -= ram_mb
self.free_disk_mb -= disk_mb
self.vcpus_used += vcpus
# Track number of instances on host
self.num_instances += 1
pci_requests = spec_obj.pci_requests
if pci_requests and self.pci_stats:
pci_requests = pci_requests.requests
else:
pci_requests = None
# Calculate the numa usage
host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
self)
instance_numa_topology = spec_obj.numa_topology
spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
host_numa_topology, instance_numa_topology,
limits=self.limits.get('numa_topology'),
pci_requests=pci_requests, pci_stats=self.pci_stats)
if pci_requests:
instance_cells = None
if spec_obj.numa_topology:
instance_cells = spec_obj.numa_topology.cells
self.pci_stats.apply_requests(pci_requests, instance_cells)
# NOTE(sbauza): Yeah, that's crap. We should get rid of all of those
# NUMA helpers because now we're 100% sure that spec_obj.numa_topology
# is an InstanceNUMATopology object. Unfortunately, since
# HostState.host_numa_topology is still limbo between an NUMATopology
# object (when updated by consume_from_request), a ComputeNode object
# (when updated by update_from_compute_node), we need to keep the call
# to get_host_numa_usage_from_instance until it's fixed (and use a
# temporary orphaned Instance object as a proxy)
instance = objects.Instance(numa_topology=spec_obj.numa_topology)
self.numa_topology = hardware.get_host_numa_usage_from_instance(
self, instance)
# NOTE(sbauza): By considering all cases when the scheduler is called
# and when consume_from_request() is run, we can safely say that there
# is always an IO operation because we want to move the instance
self.num_io_ops += 1
def __repr__(self):
return ("(%s, %s) ram:%s disk:%s io_ops:%s instances:%s" %
(self.host, self.nodename, self.free_ram_mb, self.free_disk_mb,
self.num_io_ops, self.num_instances))
class HostManager(object):
"""Base HostManager class."""
# Can be overridden in a subclass
def host_state_cls(self, host, node, **kwargs):
return HostState(host, node, **kwargs)
def __init__(self):
self.host_state_map = {}
self.filter_handler = filters.HostFilterHandler()
filter_classes = self.filter_handler.get_matching_classes(
CONF.scheduler_available_filters)
self.filter_cls_map = {cls.__name__: cls for cls in filter_classes}
self.filter_obj_map = {}
self.default_filters = self._choose_host_filters(self._load_filters())
self.weight_handler = weights.HostWeightHandler()
weigher_classes = self.weight_handler.get_matching_classes(
CONF.scheduler_weight_classes)
self.weighers = [cls() for cls in weigher_classes]
# Dict of aggregates keyed by their ID
self.aggs_by_id = {}
# Dict of set of aggregate IDs keyed by the name of the host belonging
# to those aggregates
self.host_aggregates_map = collections.defaultdict(set)
self._init_aggregates()
self.tracks_instance_changes = CONF.scheduler_tracks_instance_changes
# Dict of instances and status, keyed by host
self._instance_info = {}
if self.tracks_instance_changes:
self._init_instance_info()
def _load_filters(self):
return CONF.scheduler_default_filters
def _init_aggregates(self):
elevated = context_module.get_admin_context()
aggs = objects.AggregateList.get_all(elevated)
for agg in aggs:
self.aggs_by_id[agg.id] = agg
for host in agg.hosts:
self.host_aggregates_map[host].add(agg.id)
def update_aggregates(self, aggregates):
"""Updates internal HostManager information about aggregates."""
if isinstance(aggregates, (list, objects.AggregateList)):
for agg in aggregates:
self._update_aggregate(agg)
else:
self._update_aggregate(aggregates)
def _update_aggregate(self, aggregate):
self.aggs_by_id[aggregate.id] = aggregate
for host in aggregate.hosts:
self.host_aggregates_map[host].add(aggregate.id)
# Refreshing the mapping dict to remove all hosts that are no longer
# part of the aggregate
for host in self.host_aggregates_map:
if (aggregate.id in self.host_aggregates_map[host]
and host not in aggregate.hosts):
self.host_aggregates_map[host].remove(aggregate.id)
def delete_aggregate(self, aggregate):
"""Deletes internal HostManager information about a specific aggregate.
"""
if aggregate.id in self.aggs_by_id:
del self.aggs_by_id[aggregate.id]
for host in aggregate.hosts:
if aggregate.id in self.host_aggregates_map[host]:
self.host_aggregates_map[host].remove(aggregate.id)
def _init_instance_info(self):
"""Creates the initial view of instances for all hosts.
As this initial population of instance information may take some time,
we don't wish to block the scheduler's startup while this completes.
The async method allows us to simply mock out the _init_instance_info()
method in tests.
"""
def _async_init_instance_info():
context = context_module.get_admin_context()
LOG.debug("START:_async_init_instance_info")
self._instance_info = {}
compute_nodes = objects.ComputeNodeList.get_all(context).objects
LOG.debug("Total number of compute nodes: %s", len(compute_nodes))
# Break the queries into batches of 10 to reduce the total number
# of calls to the DB.
batch_size = 10
start_node = 0
end_node = batch_size
while start_node <= len(compute_nodes):
curr_nodes = compute_nodes[start_node:end_node]
start_node += batch_size
end_node += batch_size
filters = {"host": [curr_node.host
for curr_node in curr_nodes]}
result = objects.InstanceList.get_by_filters(context,
filters)
instances = result.objects
LOG.debug("Adding %s instances for hosts %s-%s",
len(instances), start_node, end_node)
for instance in instances:
host = instance.host
if host not in self._instance_info:
self._instance_info[host] = {"instances": {},
"updated": False}
inst_dict = self._instance_info[host]
inst_dict["instances"][instance.uuid] = instance
# Call sleep() to cooperatively yield
time.sleep(0)
LOG.debug("END:_async_init_instance_info")
# Run this async so that we don't block the scheduler start-up
utils.spawn_n(_async_init_instance_info)
def _choose_host_filters(self, filter_cls_names):
"""Since the caller may specify which filters to use we need
to have an authoritative list of what is permissible. This
function checks the filter names against a predefined set
of acceptable filters.
"""
if not isinstance(filter_cls_names, (list, tuple)):
filter_cls_names = [filter_cls_names]
good_filters = []
bad_filters = []
for filter_name in filter_cls_names:
if filter_name not in self.filter_obj_map:
if filter_name not in self.filter_cls_map:
bad_filters.append(filter_name)
continue
filter_cls = self.filter_cls_map[filter_name]
self.filter_obj_map[filter_name] = filter_cls()
good_filters.append(self.filter_obj_map[filter_name])
if bad_filters:
msg = ", ".join(bad_filters)
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters
def get_filtered_hosts(self, hosts, spec_obj,
filter_class_names=None, index=0):
"""Filter hosts and return only ones passing all filters."""
def _strip_ignore_hosts(host_map, hosts_to_ignore):
ignored_hosts = []
for host in hosts_to_ignore:
for (hostname, nodename) in list(host_map.keys()):
if host == hostname:
del host_map[(hostname, nodename)]
ignored_hosts.append(host)
ignored_hosts_str = ', '.join(ignored_hosts)
LOG.info(_LI('Host filter ignoring hosts: %s'), ignored_hosts_str)
def _match_forced_hosts(host_map, hosts_to_force):
forced_hosts = []
for (hostname, nodename) in list(host_map.keys()):
if hostname not in hosts_to_force:
del host_map[(hostname, nodename)]
else:
forced_hosts.append(hostname)
if host_map:
forced_hosts_str = ', '.join(forced_hosts)
msg = _LI('Host filter forcing available hosts to %s')
else:
forced_hosts_str = ', '.join(hosts_to_force)
msg = _LI("No hosts matched due to not matching "
"'force_hosts' value of '%s'")
LOG.info(msg % forced_hosts_str)
def _match_forced_nodes(host_map, nodes_to_force):
forced_nodes = []
for (hostname, nodename) in list(host_map.keys()):
if nodename not in nodes_to_force:
del host_map[(hostname, nodename)]
else:
forced_nodes.append(nodename)
if host_map:
forced_nodes_str = ', '.join(forced_nodes)
msg = _LI('Host filter forcing available nodes to %s')
else:
forced_nodes_str = ', '.join(nodes_to_force)
msg = _LI("No nodes matched due to not matching "
"'force_nodes' value of '%s'")
LOG.info(msg % forced_nodes_str)
if filter_class_names is None:
filters = self.default_filters
else:
filters = self._choose_host_filters(filter_class_names)
ignore_hosts = spec_obj.ignore_hosts or []
force_hosts = spec_obj.force_hosts or []
force_nodes = spec_obj.force_nodes or []
if ignore_hosts or force_hosts or force_nodes:
# NOTE(deva): we can't assume "host" is unique because
# one host may have many nodes.
name_to_cls_map = {(x.host, x.nodename): x for x in hosts}
if ignore_hosts:
_strip_ignore_hosts(name_to_cls_map, ignore_hosts)
if not name_to_cls_map:
return []
# NOTE(deva): allow force_hosts and force_nodes independently
if force_hosts:
_match_forced_hosts(name_to_cls_map, force_hosts)
if force_nodes:
_match_forced_nodes(name_to_cls_map, force_nodes)
if force_hosts or force_nodes:
# NOTE(deva): Skip filters when forcing host or node
if name_to_cls_map:
return name_to_cls_map.values()
hosts = six.itervalues(name_to_cls_map)
return self.filter_handler.get_filtered_objects(filters,
hosts, spec_obj, index)
def get_weighed_hosts(self, hosts, spec_obj):
"""Weigh the hosts."""
return self.weight_handler.get_weighed_objects(self.weighers,
hosts, spec_obj)
def get_all_host_states(self, context):
"""Returns a list of HostStates that represents all the hosts
the HostManager knows about. Also, each of the consumable resources
in HostState are pre-populated and adjusted based on data in the db.
"""
service_refs = {service.host: service
for service in objects.ServiceList.get_by_binary(
context, 'nova-compute')}
# Get resource usage across the available compute nodes:
compute_nodes = objects.ComputeNodeList.get_all(context)
seen_nodes = set()
for compute in compute_nodes:
service = service_refs.get(compute.host)
if not service:
LOG.warning(_LW(
"No compute service record found for host %(host)s"),
{'host': compute.host})
continue
host = compute.host
node = compute.hypervisor_hostname
state_key = (host, node)
host_state = self.host_state_map.get(state_key)
if host_state:
host_state.update_from_compute_node(compute)
else:
host_state = self.host_state_cls(host, node, compute=compute)
self.host_state_map[state_key] = host_state
# We force to update the aggregates info each time a new request
# comes in, because some changes on the aggregates could have been
# happening after setting this field for the first time
host_state.aggregates = [self.aggs_by_id[agg_id] for agg_id in
self.host_aggregates_map[
host_state.host]]
host_state.update_service(dict(service))
self._add_instance_info(context, compute, host_state)
seen_nodes.add(state_key)
# remove compute nodes from host_state_map if they are not active
dead_nodes = set(self.host_state_map.keys()) - seen_nodes
for state_key in dead_nodes:
host, node = state_key
LOG.info(_LI("Removing dead compute node %(host)s:%(node)s "
"from scheduler"), {'host': host, 'node': node})
del self.host_state_map[state_key]
return six.itervalues(self.host_state_map)
def _add_instance_info(self, context, compute, host_state):
"""Adds the host instance info to the host_state object.
Some older compute nodes may not be sending instance change updates to
the Scheduler; other sites may disable this feature for performance
reasons. In either of these cases, there will either be no information
for the host, or the 'updated' value for that host dict will be False.
In those cases, we need to grab the current InstanceList instead of
relying on the version in _instance_info.
"""
host_name = compute.host
host_info = self._instance_info.get(host_name)
if host_info and host_info.get("updated"):
inst_dict = host_info["instances"]
else:
# Host is running old version, or updates aren't flowing.
inst_list = objects.InstanceList.get_by_host(context, host_name)
inst_dict = {instance.uuid: instance
for instance in inst_list.objects}
host_state.instances = inst_dict
def _recreate_instance_info(self, context, host_name):
"""Get the InstanceList for the specified host, and store it in the
_instance_info dict.
"""
instances = objects.InstanceList.get_by_host(context, host_name)
inst_dict = {instance.uuid: instance for instance in instances}
host_info = self._instance_info[host_name] = {}
host_info["instances"] = inst_dict
host_info["updated"] = False
@utils.synchronized(HOST_INSTANCE_SEMAPHORE)
def update_instance_info(self, context, host_name, instance_info):
"""Receives an InstanceList object from a compute node.
This method receives information from a compute node when it starts up,
or when its instances have changed, and updates its view of hosts and
instances with it.
"""
host_info = self._instance_info.get(host_name)
if host_info:
inst_dict = host_info.get("instances")
for instance in instance_info.objects:
# Overwrite the entry (if any) with the new info.
inst_dict[instance.uuid] = instance
host_info["updated"] = True
else:
instances = instance_info.objects
if len(instances) > 1:
# This is a host sending its full instance list, so use it.
host_info = self._instance_info[host_name] = {}
host_info["instances"] = {instance.uuid: instance
for instance in instances}
host_info["updated"] = True
else:
self._recreate_instance_info(context, host_name)
LOG.info(_LI("Received an update from an unknown host '%s'. "
"Re-created its InstanceList."), host_name)
@utils.synchronized(HOST_INSTANCE_SEMAPHORE)
def delete_instance_info(self, context, host_name, instance_uuid):
"""Receives the UUID from a compute node when one of its instances is
terminated.
The instance in the local view of the host's instances is removed.
"""
host_info = self._instance_info.get(host_name)
if host_info:
inst_dict = host_info["instances"]
# Remove the existing Instance object, if any
inst_dict.pop(instance_uuid, None)
host_info["updated"] = True
else:
self._recreate_instance_info(context, host_name)
LOG.info(_LI("Received a delete update from an unknown host '%s'. "
"Re-created its InstanceList."), host_name)
@utils.synchronized(HOST_INSTANCE_SEMAPHORE)
def sync_instance_info(self, context, host_name, instance_uuids):
"""Receives the uuids of the instances on a host.
This method is periodically called by the compute nodes, which send a
list of all the UUID values for the instances on that node. This is
used by the scheduler's HostManager to detect when its view of the
compute node's instances is out of sync.
"""
host_info = self._instance_info.get(host_name)
if host_info:
local_set = set(host_info["instances"].keys())
compute_set = set(instance_uuids)
if not local_set == compute_set:
self._recreate_instance_info(context, host_name)
LOG.info(_LI("The instance sync for host '%s' did not match. "
"Re-created its InstanceList."), host_name)
return
host_info["updated"] = True
LOG.info(_LI("Successfully synced instances from host '%s'."),
host_name)
else:
self._recreate_instance_info(context, host_name)
LOG.info(_LI("Received a sync request from an unknown host '%s'. "
"Re-created its InstanceList."), host_name)
| |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.api import create_page, publish_page, add_plugin
from cms.conf.patch import post_patch_check
from cms.exceptions import PluginAlreadyRegistered, PluginNotRegistered
from cms.models import Page, Placeholder
from cms.models.pluginmodel import CMSPlugin, PluginModelBase
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.plugins.file.models import File
from cms.plugins.inherit.models import InheritPagePlaceholder
from cms.plugins.link.forms import LinkForm
from cms.plugins.link.models import Link
from cms.plugins.text.models import Text
from cms.plugins.text.utils import (plugin_tags_to_id_list,
plugin_tags_to_admin_html)
from cms.plugins.twitter.models import TwitterRecentEntries
from cms.test_utils.testcases import (CMSTestCase, URL_CMS_PAGE,
URL_CMS_PAGE_ADD, URL_CMS_PLUGIN_ADD, URL_CMS_PLUGIN_EDIT, URL_CMS_PAGE_CHANGE,
URL_CMS_PLUGIN_REMOVE)
from cms.test_utils.util.context_managers import SettingsOverride
from cms.utils.copy_plugins import copy_plugins_to
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management import call_command
from django.forms.widgets import Media
from django.test.testcases import TestCase
from project.pluginapp.models import Article, Section
from project.pluginapp.plugins.manytomany_rel.models import ArticlePluginModel
import os
class DumbFixturePlugin(CMSPluginBase):
model = CMSPlugin
name = "Dumb Test Plugin. It does nothing."
render_template = ""
admin_preview = False
def render(self, context, instance, placeholder):
return context
class PluginsTestBaseCase(CMSTestCase):
def setUp(self):
self.super_user = User(username="test", is_staff = True, is_active = True, is_superuser = True)
self.super_user.set_password("test")
self.super_user.save()
self.slave = User(username="slave", is_staff=True, is_active=True, is_superuser=False)
self.slave.set_password("slave")
self.slave.save()
self.FIRST_LANG = settings.LANGUAGES[0][0]
self.SECOND_LANG = settings.LANGUAGES[1][0]
self._login_context = self.login_user_context(self.super_user)
self._login_context.__enter__()
def tearDown(self):
self._login_context.__exit__(None, None, None)
def approve_page(self, page):
response = self.client.get(URL_CMS_PAGE + "%d/approve/" % page.pk)
self.assertRedirects(response, URL_CMS_PAGE)
# reload page
return self.reload_page(page)
def get_request(self, *args, **kwargs):
request = super(PluginsTestBaseCase, self).get_request(*args, **kwargs)
request.placeholder_media = Media()
return request
class PluginsTestCase(PluginsTestBaseCase):
def test_add_edit_plugin(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type':"TextPlugin",
'language':settings.LANGUAGES[0][0],
'placeholder':page.placeholders.get(slot="body").pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + response.content + "/"
response = self.client.get(edit_url)
self.assertEquals(response.status_code, 200)
data = {
"body":"Hello World"
}
response = self.client.post(edit_url, data)
self.assertEquals(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEquals("Hello World", txt.body)
def test_copy_plugins(self):
"""
Test that copying plugins works as expected.
"""
# create some objects
page_en = create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
page_de = create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_en = page_en.placeholders.get(slot="body")
ph_de = page_de.placeholders.get(slot="body")
# add the text plugin
text_plugin_en = add_plugin(ph_en, "TextPlugin", "en", body="Hello World")
self.assertEquals(text_plugin_en.pk, CMSPlugin.objects.all()[0].pk)
# add a *nested* link plugin
link_plugin_en = add_plugin(ph_en, "LinkPlugin", "en", target=text_plugin_en,
name="A Link", url="https://www.django-cms.org")
# the call above to add a child makes a plugin reload required here.
text_plugin_en = self.reload(text_plugin_en)
# check the relations
self.assertEquals(text_plugin_en.get_children().count(), 1)
self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)
# just sanity check that so far everything went well
self.assertEqual(CMSPlugin.objects.count(), 2)
# copy the plugins to the german placeholder
copy_plugins_to(ph_en.cmsplugin_set.all(), ph_de, 'de')
self.assertEqual(ph_de.cmsplugin_set.filter(parent=None).count(), 1)
text_plugin_de = ph_de.cmsplugin_set.get(parent=None).get_plugin_instance()[0]
self.assertEqual(text_plugin_de.get_children().count(), 1)
link_plugin_de = text_plugin_de.get_children().get().get_plugin_instance()[0]
# check we have twice as many plugins as before
self.assertEqual(CMSPlugin.objects.count(), 4)
# check language plugins
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), 2)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), 2)
text_plugin_en = self.reload(text_plugin_en)
link_plugin_en = self.reload(link_plugin_en)
# check the relations in english didn't change
self.assertEquals(text_plugin_en.get_children().count(), 1)
self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)
self.assertEqual(link_plugin_de.name, link_plugin_en.name)
self.assertEqual(link_plugin_de.url, link_plugin_en.url)
self.assertEqual(text_plugin_de.body, text_plugin_en.body)
def test_remove_plugin_before_published(self):
"""
When removing a draft plugin we would expect the public copy of the plugin to also be removed
"""
# add a page
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
# add a plugin
plugin_data = {
'plugin_type':"TextPlugin",
'language':settings.LANGUAGES[0][0],
'placeholder':page.placeholders.get(slot="body").pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# there should be only 1 plugin
self.assertEquals(CMSPlugin.objects.all().count(), 1)
# delete the plugin
plugin_data = {
'plugin_id': int(response.content)
}
remove_url = URL_CMS_PLUGIN_REMOVE
response = self.client.post(remove_url, plugin_data)
self.assertEquals(response.status_code, 200)
# there should be no plugins
self.assertEquals(0, CMSPlugin.objects.all().count())
def test_remove_plugin_after_published(self):
# add a page
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
# add a plugin
plugin_data = {
'plugin_type':"TextPlugin",
'language':settings.LANGUAGES[0][0],
'placeholder':page.placeholders.get(slot="body").pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
plugin_id = int(response.content)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# there should be only 1 plugin
self.assertEquals(CMSPlugin.objects.all().count(), 1)
# publish page
response = self.client.post(URL_CMS_PAGE + "%d/change-status/" % page.pk, {1 :1})
self.assertEqual(response.status_code, 200)
# there should now be two plugins - 1 draft, 1 public
self.assertEquals(CMSPlugin.objects.all().count(), 2)
# delete the plugin
plugin_data = {
'plugin_id': plugin_id
}
remove_url = URL_CMS_PLUGIN_REMOVE
response = self.client.post(remove_url, plugin_data)
self.assertEquals(response.status_code, 200)
# there should be no plugins
self.assertEquals(CMSPlugin.objects.all().count(), 0)
def test_remove_plugin_not_associated_to_page(self):
"""
Test case for PlaceholderField
"""
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
# add a plugin
plugin_data = {
'plugin_type':"TextPlugin",
'language':settings.LANGUAGES[0][0],
'placeholder':page.placeholders.get(slot="body").pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# there should be only 1 plugin
self.assertEquals(CMSPlugin.objects.all().count(), 1)
ph = Placeholder(slot="subplugin")
ph.save()
plugin_data = {
'plugin_type':"TextPlugin",
'language':settings.LANGUAGES[0][0],
'placeholder': ph.pk,
'parent': int(response.content)
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# no longer allowed for security reasons
self.assertEqual(response.status_code, 404)
def test_register_plugin_twice_should_raise(self):
number_of_plugins_before = len(plugin_pool.get_all_plugins())
# The first time we register the plugin is should work
plugin_pool.register_plugin(DumbFixturePlugin)
# Let's add it a second time. We should catch and exception
raised = False
try:
plugin_pool.register_plugin(DumbFixturePlugin)
except PluginAlreadyRegistered:
raised = True
self.assertTrue(raised)
# Let's also unregister the plugin now, and assert it's not in the
# pool anymore
plugin_pool.unregister_plugin(DumbFixturePlugin)
# Let's make sure we have the same number of plugins as before:
number_of_plugins_after = len(plugin_pool.get_all_plugins())
self.assertEqual(number_of_plugins_before, number_of_plugins_after)
def test_unregister_non_existing_plugin_should_raise(self):
number_of_plugins_before = len(plugin_pool.get_all_plugins())
raised = False
try:
# There should not be such a plugin registered if the others tests
# don't leak plugins
plugin_pool.unregister_plugin(DumbFixturePlugin)
except PluginNotRegistered:
raised = True
self.assertTrue(raised)
# Let's count, to make sure we didn't remove a plugin accidentally.
number_of_plugins_after = len(plugin_pool.get_all_plugins())
self.assertEqual(number_of_plugins_before, number_of_plugins_after)
def test_inheritplugin_media(self):
"""
Test case for InheritPagePlaceholder
"""
with SettingsOverride(CMS_MODERATOR=False):
inheritfrompage = create_page('page to inherit from',
'nav_playground.html',
'en')
body = inheritfrompage.placeholders.get(slot="body")
plugin = TwitterRecentEntries(
plugin_type='TwitterRecentEntriesPlugin',
placeholder=body,
position=1,
language=settings.LANGUAGE_CODE,
twitter_user='djangocms',
)
plugin.insert_at(None, position='last-child', save=True)
page = create_page('inherit from page',
'nav_playground.html',
'en',
published=True)
inherited_body = page.placeholders.get(slot="body")
inherit_plugin = InheritPagePlaceholder(
plugin_type='InheritPagePlaceholderPlugin',
placeholder=inherited_body,
position=1,
language=settings.LANGUAGE_CODE,
from_page=inheritfrompage,
from_language=settings.LANGUAGE_CODE)
inherit_plugin.insert_at(None, position='last-child', save=True)
self.client.logout()
response = self.client.get(page.get_absolute_url())
self.assertTrue('%scms/js/libs/jquery.tweet.js' % settings.STATIC_URL in response.content, response.content)
def test_copy_textplugin(self):
"""
Test that copying of textplugins replaces references to copied plugins
"""
page = create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin_base = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin_base.insert_at(None, position='last-child', save=False)
plugin = Text(body='')
plugin_base.set_base_attr(plugin)
plugin.save()
plugin_ref_1_base = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin_ref_1_base.insert_at(plugin_base, position='last-child', save=False)
plugin_ref_1 = Text(body='')
plugin_ref_1_base.set_base_attr(plugin_ref_1)
plugin_ref_1.save()
plugin_ref_2_base = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=2,
language=self.FIRST_LANG)
plugin_ref_2_base.insert_at(plugin_base, position='last-child', save=False)
plugin_ref_2 = Text(body='')
plugin_ref_2_base.set_base_attr(plugin_ref_2)
plugin_ref_2.save()
plugin.body = plugin_tags_to_admin_html(' {{ plugin_object %s }} {{ plugin_object %s }} ' % (str(plugin_ref_1.pk), str(plugin_ref_2.pk)))
plugin.save()
self.assertEquals(plugin.pk, 1)
page_data = self.get_new_page_data()
#create 2nd language page
page_data.update({
'language': self.SECOND_LANG,
'title': "%s %s" % (page.get_title(), self.SECOND_LANG),
})
response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk + "?language=%s" % self.SECOND_LANG, page_data)
self.assertRedirects(response, URL_CMS_PAGE)
self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)
self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 0)
self.assertEquals(CMSPlugin.objects.count(), 3)
self.assertEquals(Page.objects.all().count(), 1)
copy_data = {
'placeholder': placeholder.pk,
'language': self.SECOND_LANG,
'copy_from': self.FIRST_LANG,
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEquals(response.status_code, 200)
self.assertEqual(response.content.count('<li '), 3)
# assert copy success
self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)
self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 3)
self.assertEquals(CMSPlugin.objects.count(), 6)
new_plugin = Text.objects.get(pk=6)
self.assertEquals(plugin_tags_to_id_list(new_plugin.body), [u'4', u'5'])
class FileSystemPluginTests(PluginsTestBaseCase):
def setUp(self):
super(FileSystemPluginTests, self).setUp()
call_command('collectstatic', interactive=False, verbosity=0, link=True)
def tearDown(self):
for directory in [settings.STATIC_ROOT, settings.MEDIA_ROOT]:
for root, dirs, files in os.walk(directory, topdown=False):
# We need to walk() the directory tree since rmdir() does not allow
# to remove non-empty directories...
for name in files:
# Start by killing all files we walked
os.remove(os.path.join(root, name))
for name in dirs:
# Now all directories we walked...
os.rmdir(os.path.join(root, name))
super(FileSystemPluginTests, self).tearDown()
def test_fileplugin_icon_uppercase(self):
page = create_page('testpage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot="body")
plugin = File(
plugin_type='FilePlugin',
placeholder=body,
position=1,
language=settings.LANGUAGE_CODE,
)
plugin.file.save("UPPERCASE.JPG", SimpleUploadedFile("UPPERCASE.jpg", "content"), False)
plugin.insert_at(None, position='last-child', save=True)
self.assertNotEquals(plugin.get_icon_url().find('jpg'), -1)
class PluginManyToManyTestCase(PluginsTestBaseCase):
def setUp(self):
self.super_user = User(username="test", is_staff = True, is_active = True, is_superuser = True)
self.super_user.set_password("test")
self.super_user.save()
self.slave = User(username="slave", is_staff=True, is_active=True, is_superuser=False)
self.slave.set_password("slave")
self.slave.save()
self._login_context = self.login_user_context(self.super_user)
self._login_context.__enter__()
# create 3 sections
self.sections = []
self.section_pks = []
for i in range(3):
section = Section.objects.create(name="section %s" %i)
self.sections.append(section)
self.section_pks.append(section.pk)
self.section_count = len(self.sections)
# create 10 articles by section
for section in self.sections:
for j in range(10):
Article.objects.create(
title="article %s" % j,
section=section
)
self.FIRST_LANG = settings.LANGUAGES[0][0]
self.SECOND_LANG = settings.LANGUAGES[1][0]
def test_add_plugin_with_m2m(self):
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
placeholder = page.placeholders.get(slot="body")
plugin_data = {
'plugin_type': "ArticlePlugin",
'language': self.FIRST_LANG,
'placeholder': placeholder.pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + response.content + "/"
response = self.client.get(edit_url)
self.assertEquals(response.status_code, 200)
data = {
'title': "Articles Plugin 1",
"sections": self.section_pks
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(ArticlePluginModel.objects.count(), 1)
plugin = ArticlePluginModel.objects.all()[0]
self.assertEquals(self.section_count, plugin.sections.count())
def test_add_plugin_with_m2m_and_publisher(self):
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
placeholder = page.placeholders.get(slot="body")
# add a plugin
plugin_data = {
'plugin_type': "ArticlePlugin",
'language': self.FIRST_LANG,
'placeholder': placeholder.pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# there should be only 1 plugin
self.assertEquals(1, CMSPlugin.objects.all().count())
articles_plugin_pk = int(response.content)
self.assertEquals(articles_plugin_pk, CMSPlugin.objects.all()[0].pk)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + response.content + "/"
data = {
'title': "Articles Plugin 1",
'sections': self.section_pks
}
response = self.client.post(edit_url, data)
self.assertEquals(response.status_code, 200)
self.assertEquals(1, ArticlePluginModel.objects.count())
articles_plugin = ArticlePluginModel.objects.all()[0]
self.assertEquals(u'Articles Plugin 1', articles_plugin.title)
self.assertEquals(self.section_count, articles_plugin.sections.count())
# check publish box
page = publish_page(page, self.super_user)
# there should now be two plugins - 1 draft, 1 public
self.assertEquals(2, ArticlePluginModel.objects.all().count())
db_counts = [plugin.sections.count() for plugin in ArticlePluginModel.objects.all()]
expected = [self.section_count for i in range(len(db_counts))]
self.assertEqual(expected, db_counts)
def test_copy_plugin_with_m2m(self):
page = create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin = ArticlePluginModel(
plugin_type='ArticlePlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin.insert_at(None, position='last-child', save=True)
edit_url = URL_CMS_PLUGIN_EDIT + str(plugin.pk) + "/"
data = {
'title': "Articles Plugin 1",
"sections": self.section_pks
}
response = self.client.post(edit_url, data)
self.assertEquals(response.status_code, 200)
self.assertEqual(ArticlePluginModel.objects.count(), 1)
self.assertEqual(ArticlePluginModel.objects.all()[0].sections.count(), self.section_count)
page_data = self.get_new_page_data()
#create 2nd language page
page_data.update({
'language': self.SECOND_LANG,
'title': "%s %s" % (page.get_title(), self.SECOND_LANG),
})
response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk + "?language=%s" % self.SECOND_LANG, page_data)
self.assertRedirects(response, URL_CMS_PAGE)
self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 1)
self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 0)
self.assertEquals(CMSPlugin.objects.count(), 1)
self.assertEquals(Page.objects.all().count(), 1)
copy_data = {
'placeholder': placeholder.pk,
'language': self.SECOND_LANG,
'copy_from': self.FIRST_LANG,
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEquals(response.status_code, 200)
self.assertEqual(response.content.count('<li '), 1)
# assert copy success
self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 1)
self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 1)
self.assertEquals(CMSPlugin.objects.count(), 2)
db_counts = [plugin.sections.count() for plugin in ArticlePluginModel.objects.all()]
expected = [self.section_count for i in range(len(db_counts))]
self.assertEqual(expected, db_counts)
class SekizaiTests(TestCase):
def test_post_patch_check(self):
post_patch_check()
def test_fail(self):
with SettingsOverride(CMS_TEMPLATES=[('fail.html', 'fail')]):
self.assertRaises(ImproperlyConfigured, post_patch_check)
class LinkPluginTestCase(PluginsTestBaseCase):
def test_does_not_verify_existance_of_url(self):
form = LinkForm(
{'name': 'Linkname', 'url': 'http://www.nonexistant.test'})
self.assertEquals(form.is_valid(), True)
class NoDatabasePluginTests(TestCase):
def test_render_meta_is_unique(self):
text = Text()
link = Link()
self.assertNotEqual(id(text._render_meta), id(link._render_meta))
def test_render_meta_does_not_leak(self):
text = Text()
link = Link()
text._render_meta.text_enabled = False
link._render_meta.text_enabled = False
self.assertFalse(text._render_meta.text_enabled)
self.assertFalse(link._render_meta.text_enabled)
link._render_meta.text_enabled = True
self.assertFalse(text._render_meta.text_enabled)
self.assertTrue(link._render_meta.text_enabled)
def test_db_table_hack(self):
# TODO: Django tests seem to leak models from test methods, somehow
# we should clear django.db.models.loading.app_cache in tearDown.
plugin_class = PluginModelBase('TestPlugin', (CMSPlugin,), {'__module__': 'cms.tests.plugins'})
self.assertEqual(plugin_class._meta.db_table, 'cmsplugin_testplugin')
def test_db_table_hack_with_mixin(self):
class LeftMixin: pass
class RightMixin: pass
plugin_class = PluginModelBase('TestPlugin2', (LeftMixin, CMSPlugin, RightMixin), {'__module__': 'cms.tests.plugins'})
self.assertEqual(plugin_class._meta.db_table, 'cmsplugin_testplugin2')
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the base baremetal driver class."""
import mox
from oslo.config import cfg
from nova.compute import power_state
from nova import exception
from nova import test
from nova.tests.image import fake as fake_image
from nova.tests import utils
from nova.tests.virt.baremetal.db import base as bm_db_base
from nova.tests.virt.baremetal.db import utils as bm_db_utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import driver as bm_driver
from nova.virt.baremetal import fake
from nova.virt.baremetal import pxe
CONF = cfg.CONF
COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.fake.FakeDriver',
instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager='nova.virt.baremetal.fake.FakePowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
group='baremetal',
)
class BareMetalDriverNoDBTestCase(test.NoDBTestCase):
def setUp(self):
super(BareMetalDriverNoDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
self.driver = bm_driver.BareMetalDriver(None)
def test_validate_driver_loading(self):
self.assertIsInstance(self.driver.driver, fake.FakeDriver)
self.assertIsInstance(self.driver.vif_driver, fake.FakeVifDriver)
self.assertIsInstance(self.driver.volume_driver, fake.FakeVolumeDriver)
self.assertIsInstance(self.driver.firewall_driver,
fake.FakeFirewallDriver)
class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalDriverWithDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
fake_image.stub_out_image_service(self.stubs)
self.context = utils.get_test_admin_context()
self.driver = bm_driver.BareMetalDriver(None)
self.addCleanup(fake_image.FakeImageService_reset)
def _create_node(self, node_info=None, nic_info=None):
result = {}
if node_info is None:
node_info = bm_db_utils.new_bm_node(
id=123,
service_host='test_host',
cpus=2,
memory_mb=2048,
)
if nic_info is None:
nic_info = [
{'address': '01:23:45:67:89:01', 'datapath_id': '0x1',
'port_no': 1},
{'address': '01:23:45:67:89:02', 'datapath_id': '0x2',
'port_no': 2},
]
result['node_info'] = node_info
result['nic_info'] = nic_info
result['node'] = db.bm_node_create(self.context, node_info)
for nic in nic_info:
db.bm_interface_create(
self.context,
result['node']['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
result['instance'] = utils.get_test_instance()
result['instance']['node'] = result['node']['uuid']
result['spawn_params'] = dict(
admin_password='test_pass',
block_device_info=None,
context=self.context,
image_meta=utils.get_test_image_info(
None, result['instance']),
injected_files=[('/fake/path', 'hello world')],
instance=result['instance'],
network_info=utils.get_test_network_info(),
)
result['destroy_params'] = dict(
context=self.context,
instance=result['instance'],
network_info=result['spawn_params']['network_info'],
block_device_info=result['spawn_params']['block_device_info'],
)
return result
def test_get_host_stats(self):
node = self._create_node()
stats = self.driver.get_host_stats()
self.assertIsInstance(stats, list)
self.assertEqual(len(stats), 1)
stats = stats[0]
self.assertEqual(stats['cpu_arch'], 'test')
self.assertEqual(stats['test_spec'], 'test_value')
self.assertEqual(stats['hypervisor_type'], 'baremetal')
self.assertEqual(stats['hypervisor_hostname'], node['node']['uuid'])
self.assertEqual(stats['host'], 'test_host')
self.assertEqual(stats['vcpus'], 2)
self.assertEqual(stats['host_memory_total'], 2048)
def test_spawn_ok(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
self.assertEqual(row['instance_name'], node['instance']['hostname'])
def test_macs_from_nic_for_instance(self):
node = self._create_node()
expected = set([nic['address'] for nic in node['nic_info']])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance_after_spawn(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
expected = set([nic['address'] for nic in node['nic_info']])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance(self):
node = self._create_node()
expected = set(['01:23:45:67:89:01', '01:23:45:67:89:02'])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance_no_interfaces(self):
# Nodes cannot boot with no MACs, so we raise an error if that happens.
node = self._create_node(nic_info=[])
self.assertRaises(exception.NovaException,
self.driver.macs_for_instance, node['instance'])
def test_spawn_node_already_associated(self):
node = self._create_node()
db.bm_node_update(self.context, node['node']['id'],
{'instance_uuid': '1234-5678'})
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertIsNone(row['task_state'])
def test_spawn_node_in_use(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
def test_spawn_node_not_found(self):
node = self._create_node()
db.bm_node_update(self.context, node['node']['id'],
{'uuid': 'hide-this-node'})
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertIsNone(row['task_state'])
def test_spawn_fails(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.DELETED)
def test_spawn_fails_to_cleanup(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
self.mox.StubOutWithMock(fake.FakePowerManager, 'deactivate_node')
fake.FakePowerManager.deactivate_node().AndReturn(None)
fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
fake.FakePowerManager.deactivate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ERROR)
def test_destroy_ok(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
self.driver.destroy(**node['destroy_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.DELETED)
self.assertIsNone(row['instance_uuid'])
self.assertIsNone(row['instance_name'])
def test_destroy_fails(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'deactivate_node')
fake.FakePowerManager.deactivate_node().AndReturn(None)
fake.FakePowerManager.deactivate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.driver.spawn(**node['spawn_params'])
self.assertRaises(test.TestingException,
self.driver.destroy, **node['destroy_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ERROR)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
def test_get_available_resources(self):
node = self._create_node()
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb'],
node['node_info']['memory_mb'])
self.assertEqual(resources['memory_mb_used'], 0)
self.assertEqual(resources['supported_instances'],
'[["test", "baremetal", "baremetal"]]')
self.driver.spawn(**node['spawn_params'])
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb_used'],
node['node_info']['memory_mb'])
self.driver.destroy(**node['destroy_params'])
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb_used'], 0)
self.assertEqual(resources['stats']['test_spec'], 'test_value')
def test_get_available_nodes(self):
self.assertEqual(0, len(self.driver.get_available_nodes()))
self.assertEqual(0, len(self.driver.get_available_nodes(refresh=True)))
node1 = self._create_node()
self.assertEqual(1, len(self.driver.get_available_nodes()))
node1['instance']['hostname'] = 'test-host-1'
self.driver.spawn(**node1['spawn_params'])
self.assertEqual(1, len(self.driver.get_available_nodes()))
self.assertEqual([node1['node']['uuid']],
self.driver.get_available_nodes())
def test_list_instances(self):
self.assertEqual([], self.driver.list_instances())
node1 = self._create_node()
self.assertEqual([], self.driver.list_instances())
node_info = bm_db_utils.new_bm_node(
id=456,
service_host='test_host',
cpus=2,
memory_mb=2048,
)
nic_info = [
{'address': 'cc:cc:cc', 'datapath_id': '0x1',
'port_no': 1},
{'address': 'dd:dd:dd', 'datapath_id': '0x2',
'port_no': 2},
]
node2 = self._create_node(node_info=node_info, nic_info=nic_info)
self.assertEqual([], self.driver.list_instances())
node1['instance']['hostname'] = 'test-host-1'
node2['instance']['hostname'] = 'test-host-2'
self.driver.spawn(**node1['spawn_params'])
self.assertEqual(['test-host-1'],
self.driver.list_instances())
self.driver.spawn(**node2['spawn_params'])
self.assertEqual(['test-host-1', 'test-host-2'],
self.driver.list_instances())
self.driver.destroy(**node1['destroy_params'])
self.assertEqual(['test-host-2'],
self.driver.list_instances())
self.driver.destroy(**node2['destroy_params'])
self.assertEqual([], self.driver.list_instances())
def test_get_info_no_such_node(self):
node = self._create_node()
self.assertRaises(exception.InstanceNotFound,
self.driver.get_info,
node['instance'])
def test_get_info_ok(self):
node = self._create_node()
db.bm_node_associate_and_update(self.context, node['node']['uuid'],
{'instance_uuid': node['instance']['uuid'],
'instance_name': node['instance']['hostname'],
'task_state': baremetal_states.ACTIVE})
res = self.driver.get_info(node['instance'])
self.assertEqual(res['state'], power_state.RUNNING)
def test_get_info_with_defunct_pm(self):
# test fix for bug 1178378
node = self._create_node()
db.bm_node_associate_and_update(self.context, node['node']['uuid'],
{'instance_uuid': node['instance']['uuid'],
'instance_name': node['instance']['hostname'],
'task_state': baremetal_states.ACTIVE})
# fake the power manager and don't get a power state
self.mox.StubOutWithMock(fake.FakePowerManager, 'is_power_on')
fake.FakePowerManager.is_power_on().AndReturn(None)
self.mox.ReplayAll()
res = self.driver.get_info(node['instance'])
# prior to the fix, returned power_state was SHUTDOWN
self.assertEqual(res['state'], power_state.NOSTATE)
self.mox.VerifyAll()
def test_dhcp_options_for_instance(self):
node = self._create_node()
fake_bootfile = "pxelinux.0"
self.mox.StubOutWithMock(pxe, 'get_pxe_bootfile_name')
pxe.get_pxe_bootfile_name(mox.IgnoreArg()).AndReturn(fake_bootfile)
self.mox.ReplayAll()
expected = [{'opt_name': 'bootfile-name', 'opt_value': fake_bootfile},
{'opt_name': 'server-ip-address', 'opt_value': CONF.my_ip},
{'opt_name': 'tftp-server', 'opt_value': CONF.my_ip}]
res = self.driver.dhcp_options_for_instance(node['instance'])
self.assertEqual(expected.sort(), res.sort())
self.mox.VerifyAll()
def test_attach_volume(self):
connection_info = {'_fake_connection_info': None}
instance = utils.get_test_instance()
mountpoint = '/dev/sdd'
self.mox.StubOutWithMock(self.driver.volume_driver, 'attach_volume')
self.driver.volume_driver.attach_volume(connection_info,
instance,
mountpoint)
self.mox.ReplayAll()
self.driver.attach_volume(None, connection_info, instance, mountpoint)
def test_detach_volume(self):
connection_info = {'_fake_connection_info': None}
instance = utils.get_test_instance()
mountpoint = '/dev/sdd'
self.mox.StubOutWithMock(self.driver.volume_driver, 'detach_volume')
self.driver.volume_driver.detach_volume(connection_info,
instance,
mountpoint)
self.mox.ReplayAll()
self.driver.detach_volume(connection_info, instance, mountpoint)
def test_attach_block_devices(self):
connection_info_1 = {'_fake_connection_info_1': None}
connection_info_2 = {'_fake_connection_info_2': None}
block_device_mapping = [{'connection_info': connection_info_1,
'mount_device': '/dev/sde'},
{'connection_info': connection_info_2,
'mount_device': '/dev/sdf'}]
block_device_info = {'block_device_mapping': block_device_mapping}
instance = utils.get_test_instance()
self.mox.StubOutWithMock(self.driver, 'attach_volume')
self.driver.attach_volume(None, connection_info_1, instance,
'/dev/sde')
self.driver.attach_volume(None, connection_info_2, instance,
'/dev/sdf')
self.mox.ReplayAll()
self.driver._attach_block_devices(instance, block_device_info)
def test_detach_block_devices(self):
connection_info_1 = {'_fake_connection_info_1': None}
connection_info_2 = {'_fake_connection_info_2': None}
block_device_mapping = [{'connection_info': connection_info_1,
'mount_device': '/dev/sde'},
{'connection_info': connection_info_2,
'mount_device': '/dev/sdf'}]
block_device_info = {'block_device_mapping': block_device_mapping}
instance = utils.get_test_instance()
self.mox.StubOutWithMock(self.driver, 'detach_volume')
self.driver.detach_volume(connection_info_1, instance, '/dev/sde')
self.driver.detach_volume(connection_info_2, instance, '/dev/sdf')
self.mox.ReplayAll()
self.driver._detach_block_devices(instance, block_device_info)
| |
"""
Support for local control of entities by emulating the Phillips Hue bridge.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/emulated_hue/
"""
import threading
import socket
import logging
import json
import os
import select
import voluptuous as vol
from homeassistant import util, core
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_FRIENDLY_NAME, SERVICE_TURN_OFF, SERVICE_TURN_ON,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
STATE_ON, HTTP_BAD_REQUEST
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_SUPPORTED_FEATURES, SUPPORT_BRIGHTNESS
)
from homeassistant.components.http import (
HomeAssistantView, HomeAssistantWSGI
)
# pylint: disable=unused-import
from homeassistant.components.http import REQUIREMENTS # noqa
import homeassistant.helpers.config_validation as cv
DOMAIN = 'emulated_hue'
_LOGGER = logging.getLogger(__name__)
CONF_HOST_IP = 'host_ip'
CONF_LISTEN_PORT = 'listen_port'
CONF_OFF_MAPS_TO_ON_DOMAINS = 'off_maps_to_on_domains'
CONF_EXPOSE_BY_DEFAULT = 'expose_by_default'
CONF_EXPOSED_DOMAINS = 'exposed_domains'
ATTR_EMULATED_HUE = 'emulated_hue'
ATTR_EMULATED_HUE_NAME = 'emulated_hue_name'
DEFAULT_LISTEN_PORT = 8300
DEFAULT_OFF_MAPS_TO_ON_DOMAINS = ['script', 'scene']
DEFAULT_EXPOSE_BY_DEFAULT = True
DEFAULT_EXPOSED_DOMAINS = [
'switch', 'light', 'group', 'input_boolean', 'media_player', 'fan'
]
HUE_API_STATE_ON = 'on'
HUE_API_STATE_BRI = 'bri'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_HOST_IP): cv.string,
vol.Optional(CONF_LISTEN_PORT, default=DEFAULT_LISTEN_PORT):
vol.All(vol.Coerce(int), vol.Range(min=1, max=65535)),
vol.Optional(CONF_OFF_MAPS_TO_ON_DOMAINS): cv.ensure_list,
vol.Optional(CONF_EXPOSE_BY_DEFAULT): cv.boolean,
vol.Optional(CONF_EXPOSED_DOMAINS): cv.ensure_list
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, yaml_config):
"""Activate the emulated_hue component."""
config = Config(yaml_config)
server = HomeAssistantWSGI(
hass,
development=False,
server_host=config.host_ip_addr,
server_port=config.listen_port,
api_password=None,
ssl_certificate=None,
ssl_key=None,
cors_origins=[],
approved_ips=[]
)
server.register_view(DescriptionXmlView(hass, config))
server.register_view(HueUsernameView(hass))
server.register_view(HueLightsView(hass, config))
upnp_listener = UPNPResponderThread(
config.host_ip_addr, config.listen_port)
def start_emulated_hue_bridge(event):
"""Start the emulated hue bridge."""
server.start()
upnp_listener.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_emulated_hue_bridge)
def stop_emulated_hue_bridge(event):
"""Stop the emulated hue bridge."""
upnp_listener.stop()
server.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_emulated_hue_bridge)
return True
# pylint: disable=too-few-public-methods
class Config(object):
"""Holds configuration variables for the emulated hue bridge."""
def __init__(self, yaml_config):
"""Initialize the instance."""
conf = yaml_config.get(DOMAIN, {})
# Get the IP address that will be passed to the Echo during discovery
self.host_ip_addr = conf.get(CONF_HOST_IP)
if self.host_ip_addr is None:
self.host_ip_addr = util.get_local_ip()
_LOGGER.warning(
"Listen IP address not specified, auto-detected address is %s",
self.host_ip_addr)
# Get the port that the Hue bridge will listen on
self.listen_port = conf.get(CONF_LISTEN_PORT)
if not isinstance(self.listen_port, int):
self.listen_port = DEFAULT_LISTEN_PORT
_LOGGER.warning(
"Listen port not specified, defaulting to %s",
self.listen_port)
# Get domains that cause both "on" and "off" commands to map to "on"
# This is primarily useful for things like scenes or scripts, which
# don't really have a concept of being off
self.off_maps_to_on_domains = conf.get(CONF_OFF_MAPS_TO_ON_DOMAINS)
if not isinstance(self.off_maps_to_on_domains, list):
self.off_maps_to_on_domains = DEFAULT_OFF_MAPS_TO_ON_DOMAINS
# Get whether or not entities should be exposed by default, or if only
# explicitly marked ones will be exposed
self.expose_by_default = conf.get(
CONF_EXPOSE_BY_DEFAULT, DEFAULT_EXPOSE_BY_DEFAULT)
# Get domains that are exposed by default when expose_by_default is
# True
self.exposed_domains = conf.get(
CONF_EXPOSED_DOMAINS, DEFAULT_EXPOSED_DOMAINS)
class DescriptionXmlView(HomeAssistantView):
"""Handles requests for the description.xml file."""
url = '/description.xml'
name = 'description:xml'
requires_auth = False
def __init__(self, hass, config):
"""Initialize the instance of the view."""
super().__init__(hass)
self.config = config
def get(self, request):
"""Handle a GET request."""
xml_template = """<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns="urn:schemas-upnp-org:device-1-0">
<specVersion>
<major>1</major>
<minor>0</minor>
</specVersion>
<URLBase>http://{0}:{1}/</URLBase>
<device>
<deviceType>urn:schemas-upnp-org:device:Basic:1</deviceType>
<friendlyName>HASS Bridge ({0})</friendlyName>
<manufacturer>Royal Philips Electronics</manufacturer>
<manufacturerURL>http://www.philips.com</manufacturerURL>
<modelDescription>Philips hue Personal Wireless Lighting</modelDescription>
<modelName>Philips hue bridge 2015</modelName>
<modelNumber>BSB002</modelNumber>
<modelURL>http://www.meethue.com</modelURL>
<serialNumber>1234</serialNumber>
<UDN>uuid:2f402f80-da50-11e1-9b23-001788255acc</UDN>
</device>
</root>
"""
resp_text = xml_template.format(
self.config.host_ip_addr, self.config.listen_port)
return self.Response(resp_text, mimetype='text/xml')
class HueUsernameView(HomeAssistantView):
"""Handle requests to create a username for the emulated hue bridge."""
url = '/api'
name = 'hue:api'
extra_urls = ['/api/']
requires_auth = False
def __init__(self, hass):
"""Initialize the instance of the view."""
super().__init__(hass)
def post(self, request):
"""Handle a POST request."""
data = request.json
if 'devicetype' not in data:
return self.json_message('devicetype not specified',
HTTP_BAD_REQUEST)
return self.json([{'success': {'username': '12345678901234567890'}}])
class HueLightsView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/<username>/lights'
name = 'api:username:lights'
extra_urls = ['/api/<username>/lights/<entity_id>',
'/api/<username>/lights/<entity_id>/state']
requires_auth = False
def __init__(self, hass, config):
"""Initialize the instance of the view."""
super().__init__(hass)
self.config = config
self.cached_states = {}
def get(self, request, username, entity_id=None):
"""Handle a GET request."""
if entity_id is None:
return self.get_lights_list()
if not request.base_url.endswith('state'):
return self.get_light_state(entity_id)
return self.Response("Method not allowed", status=405)
def put(self, request, username, entity_id=None):
"""Handle a PUT request."""
if not request.base_url.endswith('state'):
return self.Response("Method not allowed", status=405)
content_type = request.environ.get('CONTENT_TYPE', '')
if content_type == 'application/x-www-form-urlencoded':
# Alexa sends JSON data with a form data content type, for
# whatever reason, and Werkzeug parses form data automatically,
# so we need to do some gymnastics to get the data we need
json_data = None
for key in request.form:
try:
json_data = json.loads(key)
break
except ValueError:
# Try the next key?
pass
if json_data is None:
return self.Response("Bad request", status=400)
else:
json_data = request.json
return self.put_light_state(json_data, entity_id)
def get_lights_list(self):
"""Process a request to get the list of available lights."""
json_response = {}
for entity in self.hass.states.all():
if self.is_entity_exposed(entity):
json_response[entity.entity_id] = entity_to_json(entity)
return self.json(json_response)
def get_light_state(self, entity_id):
"""Process a request to get the state of an individual light."""
entity = self.hass.states.get(entity_id)
if entity is None or not self.is_entity_exposed(entity):
return self.Response("Entity not found", status=404)
cached_state = self.cached_states.get(entity_id, None)
if cached_state is None:
final_state = entity.state == STATE_ON
final_brightness = entity.attributes.get(
ATTR_BRIGHTNESS, 255 if final_state else 0)
else:
final_state, final_brightness = cached_state
json_response = entity_to_json(entity, final_state, final_brightness)
return self.json(json_response)
def put_light_state(self, request_json, entity_id):
"""Process a request to set the state of an individual light."""
config = self.config
# Retrieve the entity from the state machine
entity = self.hass.states.get(entity_id)
if entity is None:
return self.Response("Entity not found", status=404)
if not self.is_entity_exposed(entity):
return self.Response("Entity not found", status=404)
# Parse the request into requested "on" status and brightness
parsed = parse_hue_api_put_light_body(request_json, entity)
if parsed is None:
return self.Response("Bad request", status=400)
result, brightness = parsed
# Convert the resulting "on" status into the service we need to call
service = SERVICE_TURN_ON if result else SERVICE_TURN_OFF
# Construct what we need to send to the service
data = {ATTR_ENTITY_ID: entity_id}
if brightness is not None:
data[ATTR_BRIGHTNESS] = brightness
if entity.domain.lower() in config.off_maps_to_on_domains:
# Map the off command to on
service = SERVICE_TURN_ON
# Caching is required because things like scripts and scenes won't
# report as "off" to Alexa if an "off" command is received, because
# they'll map to "on". Thus, instead of reporting its actual
# status, we report what Alexa will want to see, which is the same
# as the actual requested command.
self.cached_states[entity_id] = (result, brightness)
# Perform the requested action
self.hass.services.call(core.DOMAIN, service, data, blocking=True)
json_response = \
[create_hue_success_response(entity_id, HUE_API_STATE_ON, result)]
if brightness is not None:
json_response.append(create_hue_success_response(
entity_id, HUE_API_STATE_BRI, brightness))
return self.json(json_response)
def is_entity_exposed(self, entity):
"""Determine if an entity should be exposed on the emulated bridge."""
config = self.config
if entity.attributes.get('view') is not None:
# Ignore entities that are views
return False
domain = entity.domain.lower()
explicit_expose = entity.attributes.get(ATTR_EMULATED_HUE, None)
domain_exposed_by_default = \
config.expose_by_default and domain in config.exposed_domains
# Expose an entity if the entity's domain is exposed by default and
# the configuration doesn't explicitly exclude it from being
# exposed, or if the entity is explicitly exposed
is_default_exposed = \
domain_exposed_by_default and explicit_expose is not False
return is_default_exposed or explicit_expose
def parse_hue_api_put_light_body(request_json, entity):
"""Parse the body of a request to change the state of a light."""
if HUE_API_STATE_ON in request_json:
if not isinstance(request_json[HUE_API_STATE_ON], bool):
return None
if request_json['on']:
# Echo requested device be turned on
brightness = None
report_brightness = False
result = True
else:
# Echo requested device be turned off
brightness = None
report_brightness = False
result = False
if HUE_API_STATE_BRI in request_json:
# Make sure the entity actually supports brightness
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if (entity_features & SUPPORT_BRIGHTNESS) == SUPPORT_BRIGHTNESS:
try:
# Clamp brightness from 0 to 255
brightness = \
max(0, min(int(request_json[HUE_API_STATE_BRI]), 255))
except ValueError:
return None
report_brightness = True
result = (brightness > 0)
return (result, brightness) if report_brightness else (result, None)
def entity_to_json(entity, is_on=None, brightness=None):
"""Convert an entity to its Hue bridge JSON representation."""
if is_on is None:
is_on = entity.state == STATE_ON
if brightness is None:
brightness = 255 if is_on else 0
name = entity.attributes.get(
ATTR_EMULATED_HUE_NAME, entity.attributes[ATTR_FRIENDLY_NAME])
return {
'state':
{
HUE_API_STATE_ON: is_on,
HUE_API_STATE_BRI: brightness,
'reachable': True
},
'type': 'Dimmable light',
'name': name,
'modelid': 'HASS123',
'uniqueid': entity.entity_id,
'swversion': '123'
}
def create_hue_success_response(entity_id, attr, value):
"""Create a success response for an attribute set on a light."""
success_key = '/lights/{}/state/{}'.format(entity_id, attr)
return {'success': {success_key: value}}
class UPNPResponderThread(threading.Thread):
"""Handle responding to UPNP/SSDP discovery requests."""
_interrupted = False
def __init__(self, host_ip_addr, listen_port):
"""Initialize the class."""
threading.Thread.__init__(self)
self.host_ip_addr = host_ip_addr
self.listen_port = listen_port
# Note that the double newline at the end of
# this string is required per the SSDP spec
resp_template = """HTTP/1.1 200 OK
CACHE-CONTROL: max-age=60
EXT:
LOCATION: http://{0}:{1}/description.xml
SERVER: FreeRTOS/6.0.5, UPnP/1.0, IpBridge/0.1
ST: urn:schemas-upnp-org:device:basic:1
USN: uuid:Socket-1_0-221438K0100073::urn:schemas-upnp-org:device:basic:1
"""
self.upnp_response = resp_template.format(host_ip_addr, listen_port) \
.replace("\n", "\r\n") \
.encode('utf-8')
# Set up a pipe for signaling to the receiver that it's time to
# shutdown. Essentially, we place the SSDP socket into nonblocking
# mode and use select() to wait for data to arrive on either the SSDP
# socket or the pipe. If data arrives on either one, select() returns
# and tells us which filenos have data ready to read.
#
# When we want to stop the responder, we write data to the pipe, which
# causes the select() to return and indicate that said pipe has data
# ready to be read, which indicates to us that the responder needs to
# be shutdown.
self._interrupted_read_pipe, self._interrupted_write_pipe = os.pipe()
def run(self):
"""Run the server."""
# Listen for UDP port 1900 packets sent to SSDP multicast address
ssdp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ssdp_socket.setblocking(False)
# Required for receiving multicast
ssdp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ssdp_socket.setsockopt(
socket.SOL_IP,
socket.IP_MULTICAST_IF,
socket.inet_aton(self.host_ip_addr))
ssdp_socket.setsockopt(
socket.SOL_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton("239.255.255.250") +
socket.inet_aton(self.host_ip_addr))
ssdp_socket.bind(("239.255.255.250", 1900))
while True:
if self._interrupted:
clean_socket_close(ssdp_socket)
return
try:
read, _, _ = select.select(
[self._interrupted_read_pipe, ssdp_socket], [],
[ssdp_socket])
if self._interrupted_read_pipe in read:
# Implies self._interrupted is True
clean_socket_close(ssdp_socket)
return
elif ssdp_socket in read:
data, addr = ssdp_socket.recvfrom(1024)
else:
continue
except socket.error as ex:
if self._interrupted:
clean_socket_close(ssdp_socket)
return
_LOGGER.error("UPNP Responder socket exception occured: %s",
ex.__str__)
if "M-SEARCH" in data.decode('utf-8'):
# SSDP M-SEARCH method received, respond to it with our info
resp_socket = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM)
resp_socket.sendto(self.upnp_response, addr)
resp_socket.close()
def stop(self):
"""Stop the server."""
# Request for server
self._interrupted = True
os.write(self._interrupted_write_pipe, bytes([0]))
self.join()
def clean_socket_close(sock):
"""Close a socket connection and logs its closure."""
_LOGGER.info("UPNP responder shutting down.")
sock.close()
| |
"""
"""
# Author: Valerio Maggio <valeriomaggio@gmail.com>
# Copyright (c) 2015 Valerio Maggio <valeriomaggio@gmail.com>
# License: BSD 3 clause
# Tornado Import
try:
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler
from tornado.websocket import WebSocketHandler
except ImportError:
WebSocketHandler = RequestHandler = Application = object
# Execution
from multiprocessing import Process as mp_Process
# -- Python2 Compatibility WARNING
# Python 2 requires **futures**
# https://github.com/agronholm/pythonfutures
from concurrent.futures import ProcessPoolExecutor
from threading import Thread
# Shell Namespace restoring
from inspect import ismodule as inspect_ismodule
from importlib import import_module
# Messaging
import json
import pickle
# IPython
from IPython.utils.io import capture_output
from IPython.core.interactiveshell import InteractiveShell
# Handlers and Utils
from .handlers import (WebSocketConnectionHandler, ResultCache,
ExecutionHandler)
from .settings import JS_ROLE, PY_ROLE, SERVER_PORT, SERVER_ADDR
from .utils import parse_ws_connection_id
def execute_cell(raw_cell, current_ns):
"""
Perform the execution of the async cell
"""
# Create a new InteractiveShell
shell = InteractiveShell()
# Disable Debugger
shell.call_pdb = False
shell.pdb = False
# Process and Inject in the Namespace imported modules
module_names = current_ns.pop('import_modules')
modules = {}
if module_names:
for alias, mname in module_names:
module = import_module(mname)
modules[alias] = module
shell.user_ns.update(current_ns)
if modules:
shell.user_ns.update(modules)
output = ''
with capture_output() as io:
_ = shell.run_cell(raw_cell,silent=True,
shell_futures=False)
# Update Namespace
updated_namespace = dict()
updated_namespace.setdefault('import_modules', list())
for k, v in shell.user_ns.items():
try:
if inspect_ismodule(v):
updated_namespace['import_modules'].append((k, v.__name__))
else:
_ = pickle.dumps({k:v})
updated_namespace[k] = v
except TypeError:
continue
except pickle.PicklingError:
continue
except AttributeError:
continue
# if not output:
output += io.stdout
return output, updated_namespace
class AsyncRunHandler(WebSocketHandler):
"""
Tornado WebSocket Handlers.
This class is responsible to handle the
actual communication occuring on the
web socket between the (JS) client and
(PY) server.
"""
def __init__(self, application, request, **kwargs):
super(AsyncRunHandler, self).__init__(application,
request, **kwargs)
self._session_id = ''
self._code_to_run = None
self._user_ns = None
# noinspection PyMethodOverriding
def initialize(self, connection_handler, result_cache, io_loop):
"""Initialize the WebsocketHandler injecting proper handlers
instances.
These handlers will be used to store reference to client connections,
to cache execution results, and to manage
a system of output queues, respectively.
"""
self._connection_handler = connection_handler
self._execution_cache = result_cache
self._ioloop = io_loop
def check_origin(self, origin):
return True
def open(self, connection_id):
"""
"""
print('Connection Opened for: ', connection_id)
self._connection_id = connection_id
_, session_id = parse_ws_connection_id(connection_id)
self._session_id = session_id
# ADD Websocket Connection
self._connection_handler.add(connection_id, self)
def process_work_completed(self, future):
"""
Callback injected in Tornado IOLoop to be called
whenever the future (concurrent.ProcessPoolExecutor) is completed.
"""
# This output will go to the server stdout
# to be removed
print('Future Completed')
# Get Execution results
output, namespace = future.result() # potentially blocking call
# Post-execution processing
data = {'session_id': self._session_id,
'output': output}
# ADD Cache Result
# print('Caching results for ', self.cache_id)
# FIXME: This does not work if the output includes Images
# TODO: Try using pickle here, instead of json
jsonified = json.dumps(data)
self._execution_cache.add(self._session_id, jsonified)
# Get WebSocket Connection of the client to receive updates in
# the namespace of the cell
ws_conn = self._connection_handler.get(self._connection_id)
if ws_conn:
# Send to the client the updated namespace
# Add Execution output to allow for *Output History UPDATE*
message = {'exec_output': output}
message.update(namespace)
bin_message = pickle.dumps(message)
# Write again on the web socket so to fire JS Client side.
ws_conn.write_message(bin_message, binary=True)
else:
print("No Connection found for ", self._connection_id)
def run_async_cell_execution(self):
with ProcessPoolExecutor() as executor:
future = executor.submit(execute_cell, self._code_to_run, self._user_ns)
self._ioloop.add_future(future, self.process_work_completed)
# self.process_work_completed(future)
def on_message(self, message):
"""
Handler method activated every time a new
message is received on the web socket.
"""
try:
data = json.loads(message)
except TypeError:
# Binary Message
data = pickle.loads(message)
connection_id = data.get('connection_id', '')
role_name, _ = parse_ws_connection_id(connection_id)
if role_name == JS_ROLE:
# GET Cache Result
json_data = self._execution_cache.get(connection_id)
# GET WebSocketConnection
ws_conn = self._connection_handler.get(connection_id)
if ws_conn and json_data:
ws_conn.write_message(json_data) # JS Client
else:
print('No connection nor data stored for ', role_name)
elif role_name == PY_ROLE: # parse the code to run_async_cell_execution
if 'nb_code_to_run_async' in data:
self._code_to_run = data['nb_code_to_run_async']
else: # namespace
self._user_ns = data
if self._code_to_run and self._user_ns:
# Start the execution of the cell
print("Starting Execution")
# t = Thread(target=self.run_async_cell_execution)
# t.start()
self.run_async_cell_execution()
else:
print('No Action found for Role: ', role_name)
def on_close(self):
# REMOVE WebSocketConnection
print('Closing Connection for ', self._connection_id)
self._connection_handler.remove(self._connection_id)
class PingRequestHandler(RequestHandler):
"""Dummy Request Handler used to test
connectivity to webserver"""
def get(self):
self.write("Server is Up'n'Running!")
class AsyncRunServer(mp_Process):
"""The main `multiprocessing.Process` class
controlling the execution of the
Asynch Server running.
This class is in charge to handle
references to the IO Loop (Tornado Loop
so far) and the Http Server.
"""
def __init__(self):
super(AsyncRunServer, self).__init__()
self.io_loop = None
self.http_server = None
def run(self):
#logging.basicConfig(filename='runserver.log',level=logging.DEBUG)
IOLoop.clear_current()
IOLoop.clear_instance()
self.io_loop = IOLoop.instance()
ws_connection_handler = WebSocketConnectionHandler()
results_cache = ResultCache()
tornado_app = Application(handlers=[
(r"/ws/(.*)", AsyncRunHandler, {'connection_handler': ws_connection_handler,
'result_cache': results_cache,
'io_loop': self.io_loop,
}),
(r"/ping", PingRequestHandler)])
self.http_server = HTTPServer(tornado_app)
try:
self.http_server.listen(port=SERVER_PORT,
address=SERVER_ADDR)
if not self.io_loop._running:
print('Running Server Loop')
self.io_loop.start()
else:
print("IOLoop already running")
except OSError:
print("Server is already running!")
except KeyboardInterrupt: # SIGINT, SIGTERM
print('Closing Server Loop')
self.http_server.close_all_connections()
self.io_loop.stop()
if __name__ == '__main__':
server = AsyncRunServer()
try:
server.start()
server.join()
except KeyboardInterrupt:
pass
| |
# coding: utf-8
from sqlalchemy.engine import reflection
from sqlalchemy.testing.assertions import eq_, assert_raises, \
AssertsExecutionResults
from sqlalchemy.testing import fixtures
from sqlalchemy import testing
from sqlalchemy import inspect
from sqlalchemy import Table, Column, MetaData, Integer, String, \
PrimaryKeyConstraint, ForeignKey, join, Sequence, UniqueConstraint, \
Index
from sqlalchemy import exc
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import base as postgresql
from sqlalchemy.dialects.postgresql import ARRAY
class ForeignTableReflectionTest(fixtures.TablesTest, AssertsExecutionResults):
"""Test reflection on foreign tables"""
__requires__ = 'postgresql_test_dblink',
__only_on__ = 'postgresql >= 9.3'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
from sqlalchemy.testing import config
dblink = config.file_config.get(
'sqla_testing', 'postgres_test_db_link')
testtable = Table(
'testtable', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(30)))
for ddl in [
"CREATE SERVER test_server FOREIGN DATA WRAPPER postgres_fdw "
"OPTIONS (dbname 'test', host '%s')" % dblink,
"CREATE USER MAPPING FOR public \
SERVER test_server options (user 'scott', password 'tiger')",
"CREATE FOREIGN TABLE test_foreigntable ( "
" id INT, "
" data VARCHAR(30) "
") SERVER test_server OPTIONS (table_name 'testtable')",
]:
sa.event.listen(metadata, "after_create", sa.DDL(ddl))
for ddl in [
'DROP FOREIGN TABLE test_foreigntable',
'DROP USER MAPPING FOR public SERVER test_server',
"DROP SERVER test_server"
]:
sa.event.listen(metadata, "before_drop", sa.DDL(ddl))
def test_foreign_table_is_reflected(self):
metadata = MetaData(testing.db)
table = Table('test_foreigntable', metadata, autoload=True)
eq_(set(table.columns.keys()), set(['id', 'data']),
"Columns of reflected foreign table didn't equal expected columns")
def test_get_foreign_table_names(self):
inspector = inspect(testing.db)
with testing.db.connect() as conn:
ft_names = inspector.get_foreign_table_names()
eq_(ft_names, ['test_foreigntable'])
def test_get_table_names_no_foreign(self):
inspector = inspect(testing.db)
with testing.db.connect() as conn:
names = inspector.get_table_names()
eq_(names, ['testtable'])
class MaterializedViewReflectionTest(
fixtures.TablesTest, AssertsExecutionResults):
"""Test reflection on materialized views"""
__only_on__ = 'postgresql >= 9.3'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
testtable = Table(
'testtable', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(30)))
# insert data before we create the view
@sa.event.listens_for(testtable, "after_create")
def insert_data(target, connection, **kw):
connection.execute(
target.insert(),
{"id": 89, "data": 'd1'}
)
materialized_view = sa.DDL(
"CREATE MATERIALIZED VIEW test_mview AS "
"SELECT * FROM testtable")
plain_view = sa.DDL(
"CREATE VIEW test_regview AS "
"SELECT * FROM testtable")
sa.event.listen(testtable, 'after_create', plain_view)
sa.event.listen(testtable, 'after_create', materialized_view)
sa.event.listen(
testtable, 'before_drop',
sa.DDL("DROP MATERIALIZED VIEW test_mview")
)
sa.event.listen(
testtable, 'before_drop',
sa.DDL("DROP VIEW test_regview")
)
def test_mview_is_reflected(self):
metadata = MetaData(testing.db)
table = Table('test_mview', metadata, autoload=True)
eq_(set(table.columns.keys()), set(['id', 'data']),
"Columns of reflected mview didn't equal expected columns")
def test_mview_select(self):
metadata = MetaData(testing.db)
table = Table('test_mview', metadata, autoload=True)
eq_(
table.select().execute().fetchall(),
[(89, 'd1',)]
)
def test_get_view_names(self):
insp = inspect(testing.db)
eq_(set(insp.get_view_names()), set(['test_mview', 'test_regview']))
class DomainReflectionTest(fixtures.TestBase, AssertsExecutionResults):
"""Test PostgreSQL domains"""
__only_on__ = 'postgresql > 8.3'
__backend__ = True
@classmethod
def setup_class(cls):
con = testing.db.connect()
for ddl in \
'CREATE DOMAIN testdomain INTEGER NOT NULL DEFAULT 42', \
'CREATE DOMAIN test_schema.testdomain INTEGER DEFAULT 0', \
"CREATE TYPE testtype AS ENUM ('test')", \
'CREATE DOMAIN enumdomain AS testtype':
try:
con.execute(ddl)
except exc.DBAPIError as e:
if 'already exists' not in str(e):
raise e
con.execute('CREATE TABLE testtable (question integer, answer '
'testdomain)')
con.execute('CREATE TABLE test_schema.testtable(question '
'integer, answer test_schema.testdomain, anything '
'integer)')
con.execute('CREATE TABLE crosschema (question integer, answer '
'test_schema.testdomain)')
con.execute('CREATE TABLE enum_test (id integer, data enumdomain)')
@classmethod
def teardown_class(cls):
con = testing.db.connect()
con.execute('DROP TABLE testtable')
con.execute('DROP TABLE test_schema.testtable')
con.execute('DROP TABLE crosschema')
con.execute('DROP DOMAIN testdomain')
con.execute('DROP DOMAIN test_schema.testdomain')
con.execute("DROP TABLE enum_test")
con.execute("DROP DOMAIN enumdomain")
con.execute("DROP TYPE testtype")
def test_table_is_reflected(self):
metadata = MetaData(testing.db)
table = Table('testtable', metadata, autoload=True)
eq_(set(table.columns.keys()), set(['question', 'answer']),
"Columns of reflected table didn't equal expected columns")
assert isinstance(table.c.answer.type, Integer)
def test_domain_is_reflected(self):
metadata = MetaData(testing.db)
table = Table('testtable', metadata, autoload=True)
eq_(str(table.columns.answer.server_default.arg), '42',
"Reflected default value didn't equal expected value")
assert not table.columns.answer.nullable, \
'Expected reflected column to not be nullable.'
def test_enum_domain_is_reflected(self):
metadata = MetaData(testing.db)
table = Table('enum_test', metadata, autoload=True)
eq_(
table.c.data.type.enums,
('test', )
)
def test_table_is_reflected_test_schema(self):
metadata = MetaData(testing.db)
table = Table('testtable', metadata, autoload=True,
schema='test_schema')
eq_(set(table.columns.keys()), set(['question', 'answer',
'anything']),
"Columns of reflected table didn't equal expected columns")
assert isinstance(table.c.anything.type, Integer)
def test_schema_domain_is_reflected(self):
metadata = MetaData(testing.db)
table = Table('testtable', metadata, autoload=True,
schema='test_schema')
eq_(str(table.columns.answer.server_default.arg), '0',
"Reflected default value didn't equal expected value")
assert table.columns.answer.nullable, \
'Expected reflected column to be nullable.'
def test_crosschema_domain_is_reflected(self):
metadata = MetaData(testing.db)
table = Table('crosschema', metadata, autoload=True)
eq_(str(table.columns.answer.server_default.arg), '0',
"Reflected default value didn't equal expected value")
assert table.columns.answer.nullable, \
'Expected reflected column to be nullable.'
def test_unknown_types(self):
from sqlalchemy.databases import postgresql
ischema_names = postgresql.PGDialect.ischema_names
postgresql.PGDialect.ischema_names = {}
try:
m2 = MetaData(testing.db)
assert_raises(exc.SAWarning, Table, 'testtable', m2,
autoload=True)
@testing.emits_warning('Did not recognize type')
def warns():
m3 = MetaData(testing.db)
t3 = Table('testtable', m3, autoload=True)
assert t3.c.answer.type.__class__ == sa.types.NullType
finally:
postgresql.PGDialect.ischema_names = ischema_names
class ReflectionTest(fixtures.TestBase):
__only_on__ = 'postgresql'
__backend__ = True
@testing.fails_if("postgresql < 8.4",
"Better int2vector functions not available")
@testing.provide_metadata
def test_reflected_primary_key_order(self):
meta1 = self.metadata
subject = Table('subject', meta1,
Column('p1', Integer, primary_key=True),
Column('p2', Integer, primary_key=True),
PrimaryKeyConstraint('p2', 'p1')
)
meta1.create_all()
meta2 = MetaData(testing.db)
subject = Table('subject', meta2, autoload=True)
eq_(subject.primary_key.columns.keys(), ['p2', 'p1'])
@testing.provide_metadata
def test_pg_weirdchar_reflection(self):
meta1 = self.metadata
subject = Table('subject', meta1, Column('id$', Integer,
primary_key=True))
referer = Table(
'referer', meta1,
Column(
'id', Integer, primary_key=True),
Column(
'ref', Integer, ForeignKey('subject.id$')))
meta1.create_all()
meta2 = MetaData(testing.db)
subject = Table('subject', meta2, autoload=True)
referer = Table('referer', meta2, autoload=True)
self.assert_((subject.c['id$']
== referer.c.ref).compare(
subject.join(referer).onclause))
@testing.provide_metadata
def test_reflect_default_over_128_chars(self):
Table('t', self.metadata,
Column('x', String(200), server_default="abcd" * 40)
).create(testing.db)
m = MetaData()
t = Table('t', m, autoload=True, autoload_with=testing.db)
eq_(
t.c.x.server_default.arg.text, "'%s'::character varying" % (
"abcd" * 40)
)
@testing.fails_if("postgresql < 8.1", "schema name leaks in, not sure")
@testing.provide_metadata
def test_renamed_sequence_reflection(self):
metadata = self.metadata
t = Table('t', metadata, Column('id', Integer, primary_key=True))
metadata.create_all()
m2 = MetaData(testing.db)
t2 = Table('t', m2, autoload=True, implicit_returning=False)
eq_(t2.c.id.server_default.arg.text,
"nextval('t_id_seq'::regclass)")
r = t2.insert().execute()
eq_(r.inserted_primary_key, [1])
testing.db.connect().execution_options(autocommit=True).\
execute('alter table t_id_seq rename to foobar_id_seq'
)
m3 = MetaData(testing.db)
t3 = Table('t', m3, autoload=True, implicit_returning=False)
eq_(t3.c.id.server_default.arg.text,
"nextval('foobar_id_seq'::regclass)")
r = t3.insert().execute()
eq_(r.inserted_primary_key, [2])
@testing.provide_metadata
def test_renamed_pk_reflection(self):
metadata = self.metadata
t = Table('t', metadata, Column('id', Integer, primary_key=True))
metadata.create_all()
testing.db.connect().execution_options(autocommit=True).\
execute('alter table t rename id to t_id')
m2 = MetaData(testing.db)
t2 = Table('t', m2, autoload=True)
eq_([c.name for c in t2.primary_key], ['t_id'])
@testing.provide_metadata
def test_has_temporary_table(self):
assert not testing.db.has_table("some_temp_table")
user_tmp = Table(
"some_temp_table", self.metadata,
Column("id", Integer, primary_key=True),
Column('name', String(50)),
prefixes=['TEMPORARY']
)
user_tmp.create(testing.db)
assert testing.db.has_table("some_temp_table")
@testing.provide_metadata
def test_cross_schema_reflection_one(self):
meta1 = self.metadata
users = Table('users', meta1,
Column('user_id', Integer, primary_key=True),
Column('user_name', String(30), nullable=False),
schema='test_schema')
addresses = Table(
'email_addresses', meta1,
Column(
'address_id', Integer, primary_key=True),
Column(
'remote_user_id', Integer, ForeignKey(
users.c.user_id)),
Column(
'email_address', String(20)), schema='test_schema')
meta1.create_all()
meta2 = MetaData(testing.db)
addresses = Table('email_addresses', meta2, autoload=True,
schema='test_schema')
users = Table('users', meta2, mustexist=True,
schema='test_schema')
j = join(users, addresses)
self.assert_((users.c.user_id
== addresses.c.remote_user_id).compare(j.onclause))
@testing.provide_metadata
def test_cross_schema_reflection_two(self):
meta1 = self.metadata
subject = Table('subject', meta1,
Column('id', Integer, primary_key=True))
referer = Table('referer', meta1,
Column('id', Integer, primary_key=True),
Column('ref', Integer, ForeignKey('subject.id')),
schema='test_schema')
meta1.create_all()
meta2 = MetaData(testing.db)
subject = Table('subject', meta2, autoload=True)
referer = Table('referer', meta2, schema='test_schema',
autoload=True)
self.assert_((subject.c.id
== referer.c.ref).compare(
subject.join(referer).onclause))
@testing.provide_metadata
def test_cross_schema_reflection_three(self):
meta1 = self.metadata
subject = Table('subject', meta1,
Column('id', Integer, primary_key=True),
schema='test_schema_2')
referer = Table(
'referer',
meta1,
Column(
'id',
Integer,
primary_key=True),
Column(
'ref',
Integer,
ForeignKey('test_schema_2.subject.id')),
schema='test_schema')
meta1.create_all()
meta2 = MetaData(testing.db)
subject = Table('subject', meta2, autoload=True,
schema='test_schema_2')
referer = Table('referer', meta2, autoload=True,
schema='test_schema')
self.assert_((subject.c.id
== referer.c.ref).compare(
subject.join(referer).onclause))
@testing.provide_metadata
def test_cross_schema_reflection_four(self):
meta1 = self.metadata
subject = Table('subject', meta1,
Column('id', Integer, primary_key=True),
schema='test_schema_2')
referer = Table(
'referer',
meta1,
Column(
'id',
Integer,
primary_key=True),
Column(
'ref',
Integer,
ForeignKey('test_schema_2.subject.id')),
schema='test_schema')
meta1.create_all()
conn = testing.db.connect()
conn.detach()
conn.execute("SET search_path TO test_schema, test_schema_2")
meta2 = MetaData(bind=conn)
subject = Table('subject', meta2, autoload=True,
schema='test_schema_2',
postgresql_ignore_search_path=True)
referer = Table('referer', meta2, autoload=True,
schema='test_schema',
postgresql_ignore_search_path=True)
self.assert_((subject.c.id
== referer.c.ref).compare(
subject.join(referer).onclause))
conn.close()
@testing.provide_metadata
def test_cross_schema_reflection_five(self):
meta1 = self.metadata
# we assume 'public'
default_schema = testing.db.dialect.default_schema_name
subject = Table('subject', meta1,
Column('id', Integer, primary_key=True))
referer = Table('referer', meta1,
Column('id', Integer, primary_key=True),
Column('ref', Integer, ForeignKey('subject.id')))
meta1.create_all()
meta2 = MetaData(testing.db)
subject = Table('subject', meta2, autoload=True,
schema=default_schema,
postgresql_ignore_search_path=True
)
referer = Table('referer', meta2, autoload=True,
schema=default_schema,
postgresql_ignore_search_path=True
)
assert subject.schema == default_schema
self.assert_((subject.c.id
== referer.c.ref).compare(
subject.join(referer).onclause))
@testing.provide_metadata
def test_cross_schema_reflection_six(self):
# test that the search path *is* taken into account
# by default
meta1 = self.metadata
Table('some_table', meta1,
Column('id', Integer, primary_key=True),
schema='test_schema'
)
Table('some_other_table', meta1,
Column('id', Integer, primary_key=True),
Column('sid', Integer, ForeignKey('test_schema.some_table.id')),
schema='test_schema_2'
)
meta1.create_all()
with testing.db.connect() as conn:
conn.detach()
conn.execute(
"set search_path to test_schema_2, test_schema, public")
m1 = MetaData(conn)
t1_schema = Table('some_table',
m1,
schema="test_schema",
autoload=True)
t2_schema = Table('some_other_table',
m1,
schema="test_schema_2",
autoload=True)
t2_no_schema = Table('some_other_table',
m1,
autoload=True)
t1_no_schema = Table('some_table',
m1,
autoload=True)
m2 = MetaData(conn)
t1_schema_isp = Table('some_table',
m2,
schema="test_schema",
autoload=True,
postgresql_ignore_search_path=True)
t2_schema_isp = Table('some_other_table',
m2,
schema="test_schema_2",
autoload=True,
postgresql_ignore_search_path=True)
# t2_schema refers to t1_schema, but since "test_schema"
# is in the search path, we instead link to t2_no_schema
assert t2_schema.c.sid.references(
t1_no_schema.c.id)
# the two no_schema tables refer to each other also.
assert t2_no_schema.c.sid.references(
t1_no_schema.c.id)
# but if we're ignoring search path, then we maintain
# those explicit schemas vs. what the "default" schema is
assert t2_schema_isp.c.sid.references(t1_schema_isp.c.id)
@testing.provide_metadata
def test_cross_schema_reflection_seven(self):
# test that the search path *is* taken into account
# by default
meta1 = self.metadata
Table('some_table', meta1,
Column('id', Integer, primary_key=True),
schema='test_schema'
)
Table('some_other_table', meta1,
Column('id', Integer, primary_key=True),
Column('sid', Integer, ForeignKey('test_schema.some_table.id')),
schema='test_schema_2'
)
meta1.create_all()
with testing.db.connect() as conn:
conn.detach()
conn.execute(
"set search_path to test_schema_2, test_schema, public")
meta2 = MetaData(conn)
meta2.reflect(schema="test_schema_2")
eq_(set(meta2.tables), set(
['test_schema_2.some_other_table', 'some_table']))
meta3 = MetaData(conn)
meta3.reflect(
schema="test_schema_2", postgresql_ignore_search_path=True)
eq_(set(meta3.tables), set(
['test_schema_2.some_other_table', 'test_schema.some_table']))
@testing.provide_metadata
def test_uppercase_lowercase_table(self):
metadata = self.metadata
a_table = Table('a', metadata, Column('x', Integer))
A_table = Table('A', metadata, Column('x', Integer))
a_table.create()
assert testing.db.has_table("a")
assert not testing.db.has_table("A")
A_table.create(checkfirst=True)
assert testing.db.has_table("A")
def test_uppercase_lowercase_sequence(self):
a_seq = Sequence('a')
A_seq = Sequence('A')
a_seq.create(testing.db)
assert testing.db.dialect.has_sequence(testing.db, "a")
assert not testing.db.dialect.has_sequence(testing.db, "A")
A_seq.create(testing.db, checkfirst=True)
assert testing.db.dialect.has_sequence(testing.db, "A")
a_seq.drop(testing.db)
A_seq.drop(testing.db)
@testing.provide_metadata
def test_index_reflection(self):
""" Reflecting partial & expression-based indexes should warn
"""
metadata = self.metadata
t1 = Table(
'party', metadata,
Column(
'id', String(10), nullable=False),
Column(
'name', String(20), index=True),
Column(
'aname', String(20)))
metadata.create_all()
testing.db.execute("""
create index idx1 on party ((id || name))
""")
testing.db.execute("""
create unique index idx2 on party (id) where name = 'test'
""")
testing.db.execute("""
create index idx3 on party using btree
(lower(name::text), lower(aname::text))
""")
def go():
m2 = MetaData(testing.db)
t2 = Table('party', m2, autoload=True)
assert len(t2.indexes) == 2
# Make sure indexes are in the order we expect them in
tmp = [(idx.name, idx) for idx in t2.indexes]
tmp.sort()
r1, r2 = [idx[1] for idx in tmp]
assert r1.name == 'idx2'
assert r1.unique == True
assert r2.unique == False
assert [t2.c.id] == r1.columns
assert [t2.c.name] == r2.columns
testing.assert_warnings(
go,
['Skipped unsupported reflection of '
'expression-based index idx1',
'Predicate of partial index idx2 ignored during '
'reflection',
'Skipped unsupported reflection of '
'expression-based index idx3'])
@testing.provide_metadata
def test_index_reflection_modified(self):
"""reflect indexes when a column name has changed - PG 9
does not update the name of the column in the index def.
[ticket:2141]
"""
metadata = self.metadata
t1 = Table('t', metadata,
Column('id', Integer, primary_key=True),
Column('x', Integer)
)
metadata.create_all()
conn = testing.db.connect().execution_options(autocommit=True)
conn.execute("CREATE INDEX idx1 ON t (x)")
conn.execute("ALTER TABLE t RENAME COLUMN x to y")
ind = testing.db.dialect.get_indexes(conn, "t", None)
eq_(ind, [{'unique': False, 'column_names': ['y'], 'name': 'idx1'}])
conn.close()
@testing.fails_if("postgresql < 8.2", "reloptions not supported")
@testing.provide_metadata
def test_index_reflection_with_storage_options(self):
"""reflect indexes with storage options set"""
metadata = self.metadata
Table(
't', metadata,
Column('id', Integer, primary_key=True),
Column('x', Integer)
)
metadata.create_all()
with testing.db.connect().execution_options(autocommit=True) as conn:
conn.execute("CREATE INDEX idx1 ON t (x) WITH (fillfactor = 50)")
ind = testing.db.dialect.get_indexes(conn, "t", None)
eq_(ind, [{'unique': False, 'column_names': ['x'], 'name': 'idx1',
'dialect_options':
{"postgresql_with": {"fillfactor": "50"}}}])
m = MetaData()
t1 = Table('t', m, autoload_with=conn)
eq_(
list(t1.indexes)[0].dialect_options['postgresql']['with'],
{"fillfactor": "50"}
)
@testing.provide_metadata
def test_index_reflection_with_access_method(self):
"""reflect indexes with storage options set"""
metadata = self.metadata
Table(
't', metadata,
Column('id', Integer, primary_key=True),
Column('x', ARRAY(Integer))
)
metadata.create_all()
with testing.db.connect().execution_options(autocommit=True) as conn:
conn.execute("CREATE INDEX idx1 ON t USING gin (x)")
ind = testing.db.dialect.get_indexes(conn, "t", None)
eq_(ind, [{'unique': False, 'column_names': ['x'], 'name': 'idx1',
'dialect_options': {'postgresql_using': 'gin'}}])
m = MetaData()
t1 = Table('t', m, autoload_with=conn)
eq_(
list(t1.indexes)[0].dialect_options['postgresql']['using'],
'gin'
)
@testing.provide_metadata
def test_foreign_key_option_inspection(self):
metadata = self.metadata
Table(
'person',
metadata,
Column(
'id',
String(
length=32),
nullable=False,
primary_key=True),
Column(
'company_id',
ForeignKey(
'company.id',
name='person_company_id_fkey',
match='FULL',
onupdate='RESTRICT',
ondelete='RESTRICT',
deferrable=True,
initially='DEFERRED')))
Table(
'company', metadata,
Column('id', String(length=32), nullable=False, primary_key=True),
Column('name', String(length=255)),
Column(
'industry_id',
ForeignKey(
'industry.id',
name='company_industry_id_fkey',
onupdate='CASCADE', ondelete='CASCADE',
deferrable=False, # PG default
# PG default
initially='IMMEDIATE'
)
)
)
Table('industry', metadata,
Column('id', Integer(), nullable=False, primary_key=True),
Column('name', String(length=255))
)
fk_ref = {
'person_company_id_fkey': {
'name': 'person_company_id_fkey',
'constrained_columns': ['company_id'],
'referred_columns': ['id'],
'referred_table': 'company',
'referred_schema': None,
'options': {
'onupdate': 'RESTRICT',
'deferrable': True,
'ondelete': 'RESTRICT',
'initially': 'DEFERRED',
'match': 'FULL'
}
},
'company_industry_id_fkey': {
'name': 'company_industry_id_fkey',
'constrained_columns': ['industry_id'],
'referred_columns': ['id'],
'referred_table': 'industry',
'referred_schema': None,
'options': {
'onupdate': 'CASCADE',
'deferrable': None,
'ondelete': 'CASCADE',
'initially': None,
'match': None
}
}
}
metadata.create_all()
inspector = inspect(testing.db)
fks = inspector.get_foreign_keys('person') + \
inspector.get_foreign_keys('company')
for fk in fks:
eq_(fk, fk_ref[fk['name']])
@testing.provide_metadata
def test_inspect_enums_schema(self):
conn = testing.db.connect()
enum_type = postgresql.ENUM(
'sad', 'ok', 'happy', name='mood',
schema='test_schema',
metadata=self.metadata)
enum_type.create(conn)
inspector = reflection.Inspector.from_engine(conn.engine)
eq_(
inspector.get_enums('test_schema'), [{
'visible': False,
'name': 'mood',
'schema': 'test_schema',
'labels': ['sad', 'ok', 'happy']
}])
@testing.provide_metadata
def test_inspect_enums(self):
enum_type = postgresql.ENUM(
'cat', 'dog', 'rat', name='pet', metadata=self.metadata)
enum_type.create(testing.db)
inspector = reflection.Inspector.from_engine(testing.db)
eq_(inspector.get_enums(), [
{
'visible': True,
'labels': ['cat', 'dog', 'rat'],
'name': 'pet',
'schema': 'public'
}])
@testing.provide_metadata
def test_inspect_enums_star(self):
enum_type = postgresql.ENUM(
'cat', 'dog', 'rat', name='pet', metadata=self.metadata)
schema_enum_type = postgresql.ENUM(
'sad', 'ok', 'happy', name='mood',
schema='test_schema',
metadata=self.metadata)
enum_type.create(testing.db)
schema_enum_type.create(testing.db)
inspector = reflection.Inspector.from_engine(testing.db)
eq_(inspector.get_enums(), [
{
'visible': True,
'labels': ['cat', 'dog', 'rat'],
'name': 'pet',
'schema': 'public'
}])
eq_(inspector.get_enums('*'), [
{
'visible': True,
'labels': ['cat', 'dog', 'rat'],
'name': 'pet',
'schema': 'public'
},
{
'visible': False,
'name': 'mood',
'schema': 'test_schema',
'labels': ['sad', 'ok', 'happy']
}])
@testing.provide_metadata
@testing.only_on("postgresql >= 8.5")
def test_reflection_with_unique_constraint(self):
insp = inspect(testing.db)
meta = self.metadata
uc_table = Table('pgsql_uc', meta,
Column('a', String(10)),
UniqueConstraint('a', name='uc_a'))
uc_table.create()
# PostgreSQL will create an implicit index for a unique
# constraint. Separately we get both
indexes = set(i['name'] for i in insp.get_indexes('pgsql_uc'))
constraints = set(i['name']
for i in insp.get_unique_constraints('pgsql_uc'))
self.assert_('uc_a' in indexes)
self.assert_('uc_a' in constraints)
# reflection corrects for the dupe
reflected = Table('pgsql_uc', MetaData(testing.db), autoload=True)
indexes = set(i.name for i in reflected.indexes)
constraints = set(uc.name for uc in reflected.constraints)
self.assert_('uc_a' not in indexes)
self.assert_('uc_a' in constraints)
@testing.provide_metadata
def test_reflect_unique_index(self):
insp = inspect(testing.db)
meta = self.metadata
# a unique index OTOH we are able to detect is an index
# and not a unique constraint
uc_table = Table('pgsql_uc', meta,
Column('a', String(10)),
Index('ix_a', 'a', unique=True))
uc_table.create()
indexes = dict((i['name'], i) for i in insp.get_indexes('pgsql_uc'))
constraints = set(i['name']
for i in insp.get_unique_constraints('pgsql_uc'))
self.assert_('ix_a' in indexes)
assert indexes['ix_a']['unique']
self.assert_('ix_a' not in constraints)
reflected = Table('pgsql_uc', MetaData(testing.db), autoload=True)
indexes = dict((i.name, i) for i in reflected.indexes)
constraints = set(uc.name for uc in reflected.constraints)
self.assert_('ix_a' in indexes)
assert indexes['ix_a'].unique
self.assert_('ix_a' not in constraints)
class CustomTypeReflectionTest(fixtures.TestBase):
class CustomType(object):
def __init__(self, arg1=None, arg2=None):
self.arg1 = arg1
self.arg2 = arg2
ischema_names = None
def setup(self):
ischema_names = postgresql.PGDialect.ischema_names
postgresql.PGDialect.ischema_names = ischema_names.copy()
self.ischema_names = ischema_names
def teardown(self):
postgresql.PGDialect.ischema_names = self.ischema_names
self.ischema_names = None
def _assert_reflected(self, dialect):
for sch, args in [
('my_custom_type', (None, None)),
('my_custom_type()', (None, None)),
('my_custom_type(ARG1)', ('ARG1', None)),
('my_custom_type(ARG1, ARG2)', ('ARG1', 'ARG2')),
]:
column_info = dialect._get_column_info(
'colname', sch, None, False,
{}, {}, 'public')
assert isinstance(column_info['type'], self.CustomType)
eq_(column_info['type'].arg1, args[0])
eq_(column_info['type'].arg2, args[1])
def test_clslevel(self):
postgresql.PGDialect.ischema_names['my_custom_type'] = self.CustomType
dialect = postgresql.PGDialect()
self._assert_reflected(dialect)
def test_instancelevel(self):
dialect = postgresql.PGDialect()
dialect.ischema_names = dialect.ischema_names.copy()
dialect.ischema_names['my_custom_type'] = self.CustomType
self._assert_reflected(dialect)
| |
"""VideoCapture.py
by Markus Gritsch <gritsch@iue.tuwien.ac.at>
"""
import vidcap
from PIL import Image, ImageFont, ImageDraw
import time, string
default_textpos = 'bl' # t=top, b=bottom; l=left, c=center, r=right
textcolor = 0xffffff
shadowcolor = 0x000000
def now():
"""Returns a string containing the current date and time.
This function is used internally by VideoCapture to generate the timestamp
with which a snapshot can optionally be marked.
"""
weekday = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
#weekday = ('Mo', 'Di', 'Mi', 'Do', 'Fr', 'Sa', 'So')
#weekday = ('-', '-', '-', '-', '-', '-', '-')
y, m, d, hr, min, sec, wd, jd, dst = time.localtime(time.time())
return '%s:%s:%s %s %s.%s.%s' % (string.zfill(hr, 2), string.zfill(min, 2), string.zfill(sec, 2), weekday[wd], d, m, y)
class Device:
"""Create instances of this class which will then represent video devices.
For the lifetime of the instance, the device is blocked, so it can not be
used by other applications (which is quite normal Windows behavior).
If you want to access the device from another program, you have to delete
the instance first (e.g. del cam).
"""
def __init__(self, devnum=0, showVideoWindow=0):
"""devnum: VideoCapture enumerates the available video capture devices
on your system. If you have more than one device, specify
the desired one here. The device number starts from 0.
showVideoWindow: 0 ... do not display a video window (the default)
1 ... display a video window
Mainly used for debugging, since the video window
can not be closed or moved around.
"""
self.dev = vidcap.new_Dev(devnum, showVideoWindow)
path = os.path.dirname(__file__)
self.normalfont = ImageFont.load_path(path + 'helvetica-10.pil')
self.boldfont = ImageFont.load_path('helvB08.pil')
self.font = None
def displayPropertyPage(self):
"""deprecated
Use the methods displayCaptureFilterProperties() and
displayCapturePinProperties() instead.
"""
print 'WARNING: displayPropertyPage() is deprecated.'
print ' Use displayCaptureFilterProperties() and displayCapturePinProperties()'
print ' instead!'
self.dev.displaypropertypage()
def displayCaptureFilterProperties(self):
"""Displays a dialog containing the property page of the capture filter.
For VfW drivers you may find the option to select the resolution most
likele here.
"""
self.dev.displaycapturefilterproperties()
def displayCapturePinProperties(self):
"""Displays a dialog containing the property page of the capture pin.
For WDM drivers you may find the option to select the resolution most
likele here.
"""
self.dev.displaycapturepinproperties()
def setResolution(self, width, height):
"""Sets the capture resolution. (without dialog)
(contributed by Don Kimber <kimber@fxpal.com>)
"""
self.dev.setresolution(width, height)
def getBuffer(self):
"""Returns a string containing the raw pixel data.
You probably don't want to use this function, but rather getImage() or
saveSnapshot().
"""
return self.dev.getbuffer()
def getImage(self, timestamp=0, boldfont=0, textpos=default_textpos):
"""Returns a PIL Image instance.
timestamp: 0 ... no timestamp (the default)
1 ... simple timestamp
2 ... timestamp with shadow
3 ... timestamp with outline
boldfont: 0 ... normal font (the default)
1 ... bold font
textpos: The position of the timestamp can be specified by a string
containing a combination of two characters. One character
must be either t or b, the other one either l, c or r.
t ... top
b ... bottom
l ... left
c ... center
r ... right
The default value is 'bl'
"""
if timestamp:
#text = now()
text = time.asctime(time.localtime(time.time()))
buffer, width, height = self.getBuffer()
if buffer:
im = Image.fromstring('RGB', (width, height), buffer, 'raw', 'BGR', 0, -1)
if timestamp:
if boldfont:
self.font = self.boldfont
else:
self.font = self.normalfont
tw, th = self.font.getsize(text)
tw -= 2
th -= 2
if 't' in textpos:
y = -1
elif 'b' in textpos:
y = height - th - 2
else:
raise ValueError, "textpos must contain exactly one out of 't', 'b'"
if 'l' in textpos:
x = 2
elif 'c' in textpos:
x = (width - tw) / 2
elif 'r' in textpos:
x = (width - tw) - 2
else:
raise ValueError, "textpos must contain exactly one out of 'l', 'c', 'r'"
draw = ImageDraw.Draw(im)
if timestamp == 2: # shadow
draw.text((x+1, y), text, font=self.font, fill=shadowcolor)
draw.text((x, y+1), text, font=self.font, fill=shadowcolor)
draw.text((x+1, y+1), text, font=self.font, fill=shadowcolor)
else:
if timestamp >= 3: # thin border
draw.text((x-1, y), text, font=self.font, fill=shadowcolor)
draw.text((x+1, y), text, font=self.font, fill=shadowcolor)
draw.text((x, y-1), text, font=self.font, fill=shadowcolor)
draw.text((x, y+1), text, font=self.font, fill=shadowcolor)
if timestamp == 4: # thick border
draw.text((x-1, y-1), text, font=self.font, fill=shadowcolor)
draw.text((x+1, y-1), text, font=self.font, fill=shadowcolor)
draw.text((x-1, y+1), text, font=self.font, fill=shadowcolor)
draw.text((x+1, y+1), text, font=self.font, fill=shadowcolor)
draw.text((x, y), text, font=self.font, fill=textcolor)
return im
def saveSnapshot(self, filename, timestamp=0, boldfont=0, textpos=default_textpos, **keywords):
"""Saves a snapshot to the harddisk.
The filetype depends on the filename extension. Everything that PIL
can handle can be specified (foo.jpg, foo.gif, foo.bmp, ...).
filename: String containing the name of the resulting file.
timestamp: see getImage()
boldfont: see getImage()
textpos: see getImage()
Additional keyword arguments can be give which are just passed to the
save() method of the Image class. For example you can specify the
compression level of a JPEG image by quality=75 (which is the default
value anyway).
"""
self.getImage(timestamp, boldfont, textpos).save(filename, **keywords)
if __name__ == '__main__':
import shutil
shutil.copy('VideoCapture.py', 'C:\Python20\Lib')
shutil.copy('VideoCapture.py', 'C:\Python21\Lib')
shutil.copy('VideoCapture.py', 'C:\Python22\Lib')
shutil.copy('VideoCapture.py', 'C:\Python23\Lib')
shutil.copy('VideoCapture.py', 'C:\Python24\Lib')
#~ cam = Device(devnum=0)
#~ #cam.displayPropertyPage() ## deprecated
#~ #cam.displayCaptureFilterProperties()
#~ #cam.displayCapturePinProperties()
#~ #cam.setResolution(768, 576) ## PAL
#~ #cam.setResolution(352, 288) ## CIF
#~ cam.saveSnapshot('test.jpg', quality=75, timestamp=3, boldfont=1)
| |
import warnings
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase, ignore_warnings
from django.test.utils import reset_warning_registry
from django.utils.deprecation import RemovedInDjango110Warning
from ..utils import TestObj, setup
class IfTagTests(SimpleTestCase):
@setup({'if-tag01': '{% if foo %}yes{% else %}no{% endif %}'})
def test_if_tag01(self):
output = self.engine.render_to_string('if-tag01', {'foo': True})
self.assertEqual(output, 'yes')
@setup({'if-tag02': '{% if foo %}yes{% else %}no{% endif %}'})
def test_if_tag02(self):
output = self.engine.render_to_string('if-tag02', {'foo': False})
self.assertEqual(output, 'no')
@setup({'if-tag03': '{% if foo %}yes{% else %}no{% endif %}'})
def test_if_tag03(self):
output = self.engine.render_to_string('if-tag03')
self.assertEqual(output, 'no')
@setup({'if-tag04': '{% if foo %}foo{% elif bar %}bar{% endif %}'})
def test_if_tag04(self):
output = self.engine.render_to_string('if-tag04', {'foo': True})
self.assertEqual(output, 'foo')
@setup({'if-tag05': '{% if foo %}foo{% elif bar %}bar{% endif %}'})
def test_if_tag05(self):
output = self.engine.render_to_string('if-tag05', {'bar': True})
self.assertEqual(output, 'bar')
@setup({'if-tag06': '{% if foo %}foo{% elif bar %}bar{% endif %}'})
def test_if_tag06(self):
output = self.engine.render_to_string('if-tag06')
self.assertEqual(output, '')
@setup({'if-tag07': '{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}'})
def test_if_tag07(self):
output = self.engine.render_to_string('if-tag07', {'foo': True})
self.assertEqual(output, 'foo')
@setup({'if-tag08': '{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}'})
def test_if_tag08(self):
output = self.engine.render_to_string('if-tag08', {'bar': True})
self.assertEqual(output, 'bar')
@setup({'if-tag09': '{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}'})
def test_if_tag09(self):
output = self.engine.render_to_string('if-tag09')
self.assertEqual(output, 'nothing')
@setup({'if-tag10': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag10(self):
output = self.engine.render_to_string('if-tag10', {'foo': True})
self.assertEqual(output, 'foo')
@setup({'if-tag11': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag11(self):
output = self.engine.render_to_string('if-tag11', {'bar': True})
self.assertEqual(output, 'bar')
@setup({'if-tag12': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag12(self):
output = self.engine.render_to_string('if-tag12', {'baz': True})
self.assertEqual(output, 'baz')
@setup({'if-tag13': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag13(self):
output = self.engine.render_to_string('if-tag13')
self.assertEqual(output, 'nothing')
# Filters
@setup({'if-tag-filter01': '{% if foo|length == 5 %}yes{% else %}no{% endif %}'})
def test_if_tag_filter01(self):
output = self.engine.render_to_string('if-tag-filter01', {'foo': 'abcde'})
self.assertEqual(output, 'yes')
@setup({'if-tag-filter02': '{% if foo|upper == \'ABC\' %}yes{% else %}no{% endif %}'})
def test_if_tag_filter02(self):
output = self.engine.render_to_string('if-tag-filter02')
self.assertEqual(output, 'no')
# Equality
@setup({'if-tag-eq01': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq01(self):
output = self.engine.render_to_string('if-tag-eq01')
self.assertEqual(output, 'yes')
@setup({'if-tag-eq02': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq02(self):
output = self.engine.render_to_string('if-tag-eq02', {'foo': 1})
self.assertEqual(output, 'no')
@setup({'if-tag-eq03': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq03(self):
output = self.engine.render_to_string('if-tag-eq03', {'foo': 1, 'bar': 1})
self.assertEqual(output, 'yes')
@setup({'if-tag-eq04': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq04(self):
output = self.engine.render_to_string('if-tag-eq04', {'foo': 1, 'bar': 2})
self.assertEqual(output, 'no')
@setup({'if-tag-eq05': '{% if foo == \'\' %}yes{% else %}no{% endif %}'})
def test_if_tag_eq05(self):
output = self.engine.render_to_string('if-tag-eq05')
self.assertEqual(output, 'no')
# Inequality
@setup({'if-tag-noteq01': '{% if foo != bar %}yes{% else %}no{% endif %}'})
def test_if_tag_noteq01(self):
output = self.engine.render_to_string('if-tag-noteq01')
self.assertEqual(output, 'no')
@setup({'if-tag-noteq02': '{% if foo != bar %}yes{% else %}no{% endif %}'})
def test_if_tag_noteq02(self):
output = self.engine.render_to_string('if-tag-noteq02', {'foo': 1})
self.assertEqual(output, 'yes')
@setup({'if-tag-noteq03': '{% if foo != bar %}yes{% else %}no{% endif %}'})
def test_if_tag_noteq03(self):
output = self.engine.render_to_string('if-tag-noteq03', {'foo': 1, 'bar': 1})
self.assertEqual(output, 'no')
@setup({'if-tag-noteq04': '{% if foo != bar %}yes{% else %}no{% endif %}'})
def test_if_tag_noteq04(self):
output = self.engine.render_to_string('if-tag-noteq04', {'foo': 1, 'bar': 2})
self.assertEqual(output, 'yes')
@setup({'if-tag-noteq05': '{% if foo != "" %}yes{% else %}no{% endif %}'})
def test_if_tag_noteq05(self):
output = self.engine.render_to_string('if-tag-noteq05')
self.assertEqual(output, 'yes')
# Comparison
@setup({'if-tag-gt-01': '{% if 2 > 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_gt_01(self):
output = self.engine.render_to_string('if-tag-gt-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-gt-02': '{% if 1 > 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_gt_02(self):
output = self.engine.render_to_string('if-tag-gt-02')
self.assertEqual(output, 'no')
@setup({'if-tag-gte-01': '{% if 1 >= 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_gte_01(self):
output = self.engine.render_to_string('if-tag-gte-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-gte-02': '{% if 1 >= 2 %}yes{% else %}no{% endif %}'})
def test_if_tag_gte_02(self):
output = self.engine.render_to_string('if-tag-gte-02')
self.assertEqual(output, 'no')
@setup({'if-tag-lt-01': '{% if 1 < 2 %}yes{% else %}no{% endif %}'})
def test_if_tag_lt_01(self):
output = self.engine.render_to_string('if-tag-lt-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-lt-02': '{% if 1 < 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_lt_02(self):
output = self.engine.render_to_string('if-tag-lt-02')
self.assertEqual(output, 'no')
@setup({'if-tag-lte-01': '{% if 1 <= 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_lte_01(self):
output = self.engine.render_to_string('if-tag-lte-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-lte-02': '{% if 2 <= 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_lte_02(self):
output = self.engine.render_to_string('if-tag-lte-02')
self.assertEqual(output, 'no')
# Contains
@setup({'if-tag-in-01': '{% if 1 in x %}yes{% else %}no{% endif %}'})
def test_if_tag_in_01(self):
output = self.engine.render_to_string('if-tag-in-01', {'x': [1]})
self.assertEqual(output, 'yes')
@setup({'if-tag-in-02': '{% if 2 in x %}yes{% else %}no{% endif %}'})
def test_if_tag_in_02(self):
output = self.engine.render_to_string('if-tag-in-02', {'x': [1]})
self.assertEqual(output, 'no')
@setup({'if-tag-not-in-01': '{% if 1 not in x %}yes{% else %}no{% endif %}'})
def test_if_tag_not_in_01(self):
output = self.engine.render_to_string('if-tag-not-in-01', {'x': [1]})
self.assertEqual(output, 'no')
@setup({'if-tag-not-in-02': '{% if 2 not in x %}yes{% else %}no{% endif %}'})
def test_if_tag_not_in_02(self):
output = self.engine.render_to_string('if-tag-not-in-02', {'x': [1]})
self.assertEqual(output, 'yes')
# AND
@setup({'if-tag-and01': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and01(self):
output = self.engine.render_to_string('if-tag-and01', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-and02': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and02(self):
output = self.engine.render_to_string('if-tag-and02', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and03': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and03(self):
output = self.engine.render_to_string('if-tag-and03', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-and04': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and04(self):
output = self.engine.render_to_string('if-tag-and04', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and05': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and05(self):
output = self.engine.render_to_string('if-tag-and05', {'foo': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and06': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and06(self):
output = self.engine.render_to_string('if-tag-and06', {'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and07': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and07(self):
output = self.engine.render_to_string('if-tag-and07', {'foo': True})
self.assertEqual(output, 'no')
@setup({'if-tag-and08': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and08(self):
output = self.engine.render_to_string('if-tag-and08', {'bar': True})
self.assertEqual(output, 'no')
# OR
@setup({'if-tag-or01': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or01(self):
output = self.engine.render_to_string('if-tag-or01', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or02': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or02(self):
output = self.engine.render_to_string('if-tag-or02', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-or03': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or03(self):
output = self.engine.render_to_string('if-tag-or03', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or04': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or04(self):
output = self.engine.render_to_string('if-tag-or04', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-or05': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or05(self):
output = self.engine.render_to_string('if-tag-or05', {'foo': False})
self.assertEqual(output, 'no')
@setup({'if-tag-or06': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or06(self):
output = self.engine.render_to_string('if-tag-or06', {'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-or07': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or07(self):
output = self.engine.render_to_string('if-tag-or07', {'foo': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or08': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or08(self):
output = self.engine.render_to_string('if-tag-or08', {'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or09': '{% if foo or bar or baz %}yes{% else %}no{% endif %}'})
def test_if_tag_or09(self):
"""
multiple ORs
"""
output = self.engine.render_to_string('if-tag-or09', {'baz': True})
self.assertEqual(output, 'yes')
# NOT
@setup({'if-tag-not01': '{% if not foo %}no{% else %}yes{% endif %}'})
def test_if_tag_not01(self):
output = self.engine.render_to_string('if-tag-not01', {'foo': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not02': '{% if not not foo %}no{% else %}yes{% endif %}'})
def test_if_tag_not02(self):
output = self.engine.render_to_string('if-tag-not02', {'foo': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not06': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not06(self):
output = self.engine.render_to_string('if-tag-not06')
self.assertEqual(output, 'no')
@setup({'if-tag-not07': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not07(self):
output = self.engine.render_to_string('if-tag-not07', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not08': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not08(self):
output = self.engine.render_to_string('if-tag-not08', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not09': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not09(self):
output = self.engine.render_to_string('if-tag-not09', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not10': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not10(self):
output = self.engine.render_to_string('if-tag-not10', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not11': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not11(self):
output = self.engine.render_to_string('if-tag-not11')
self.assertEqual(output, 'no')
@setup({'if-tag-not12': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not12(self):
output = self.engine.render_to_string('if-tag-not12', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not13': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not13(self):
output = self.engine.render_to_string('if-tag-not13', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not14': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not14(self):
output = self.engine.render_to_string('if-tag-not14', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not15': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not15(self):
output = self.engine.render_to_string('if-tag-not15', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not16': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not16(self):
output = self.engine.render_to_string('if-tag-not16')
self.assertEqual(output, 'yes')
@setup({'if-tag-not17': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not17(self):
output = self.engine.render_to_string('if-tag-not17', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not18': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not18(self):
output = self.engine.render_to_string('if-tag-not18', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not19': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not19(self):
output = self.engine.render_to_string('if-tag-not19', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not20': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not20(self):
output = self.engine.render_to_string('if-tag-not20', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not21': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not21(self):
output = self.engine.render_to_string('if-tag-not21')
self.assertEqual(output, 'yes')
@setup({'if-tag-not22': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not22(self):
output = self.engine.render_to_string('if-tag-not22', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not23': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not23(self):
output = self.engine.render_to_string('if-tag-not23', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not24': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not24(self):
output = self.engine.render_to_string('if-tag-not24', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not25': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not25(self):
output = self.engine.render_to_string('if-tag-not25', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not26': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not26(self):
output = self.engine.render_to_string('if-tag-not26')
self.assertEqual(output, 'yes')
@setup({'if-tag-not27': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not27(self):
output = self.engine.render_to_string('if-tag-not27', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not28': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not28(self):
output = self.engine.render_to_string('if-tag-not28', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not29': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not29(self):
output = self.engine.render_to_string('if-tag-not29', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not30': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not30(self):
output = self.engine.render_to_string('if-tag-not30', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not31': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not31(self):
output = self.engine.render_to_string('if-tag-not31')
self.assertEqual(output, 'yes')
@setup({'if-tag-not32': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not32(self):
output = self.engine.render_to_string('if-tag-not32', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not33': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not33(self):
output = self.engine.render_to_string('if-tag-not33', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not34': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not34(self):
output = self.engine.render_to_string('if-tag-not34', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not35': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not35(self):
output = self.engine.render_to_string('if-tag-not35', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
# Various syntax errors
@setup({'if-tag-error01': '{% if %}yes{% endif %}'})
def test_if_tag_error01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error01')
@setup({'if-tag-error02': '{% if foo and %}yes{% else %}no{% endif %}'})
def test_if_tag_error02(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error02', {'foo': True})
@setup({'if-tag-error03': '{% if foo or %}yes{% else %}no{% endif %}'})
def test_if_tag_error03(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error03', {'foo': True})
@setup({'if-tag-error04': '{% if not foo and %}yes{% else %}no{% endif %}'})
def test_if_tag_error04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error04', {'foo': True})
@setup({'if-tag-error05': '{% if not foo or %}yes{% else %}no{% endif %}'})
def test_if_tag_error05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error05', {'foo': True})
@setup({'if-tag-error06': '{% if abc def %}yes{% endif %}'})
def test_if_tag_error06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error06')
@setup({'if-tag-error07': '{% if not %}yes{% endif %}'})
def test_if_tag_error07(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error07')
@setup({'if-tag-error08': '{% if and %}yes{% endif %}'})
def test_if_tag_error08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error08')
@setup({'if-tag-error09': '{% if or %}yes{% endif %}'})
def test_if_tag_error09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error09')
@setup({'if-tag-error10': '{% if == %}yes{% endif %}'})
def test_if_tag_error10(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error10')
@setup({'if-tag-error11': '{% if 1 == %}yes{% endif %}'})
def test_if_tag_error11(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error11')
@setup({'if-tag-error12': '{% if a not b %}yes{% endif %}'})
def test_if_tag_error12(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error12')
@setup({'if-tag-shortcircuit01': '{% if x.is_true or x.is_bad %}yes{% else %}no{% endif %}'})
def test_if_tag_shortcircuit01(self):
"""
If evaluations are shortcircuited where possible
"""
output = self.engine.render_to_string('if-tag-shortcircuit01', {'x': TestObj()})
self.assertEqual(output, 'yes')
@setup({'if-tag-shortcircuit02': '{% if x.is_false and x.is_bad %}yes{% else %}no{% endif %}'})
def test_if_tag_shortcircuit02(self):
"""
The is_bad() function should not be evaluated. If it is, an
exception is raised.
"""
output = self.engine.render_to_string('if-tag-shortcircuit02', {'x': TestObj()})
self.assertEqual(output, 'no')
@setup({'if-tag-badarg01': '{% if x|default_if_none:y %}yes{% endif %}'})
def test_if_tag_badarg01(self):
"""
Non-existent args
"""
output = self.engine.render_to_string('if-tag-badarg01')
self.assertEqual(output, '')
@setup({'if-tag-badarg02': '{% if x|default_if_none:y %}yes{% endif %}'})
def test_if_tag_badarg02(self):
output = self.engine.render_to_string('if-tag-badarg02', {'y': 0})
self.assertEqual(output, '')
@setup({'if-tag-badarg03': '{% if x|default_if_none:y %}yes{% endif %}'})
def test_if_tag_badarg03(self):
output = self.engine.render_to_string('if-tag-badarg03', {'y': 1})
self.assertEqual(output, 'yes')
@setup({'if-tag-badarg04': '{% if x|default_if_none:y %}yes{% else %}no{% endif %}'})
def test_if_tag_badarg04(self):
output = self.engine.render_to_string('if-tag-badarg04')
self.assertEqual(output, 'no')
@setup({'if-tag-eq-deprecated': '{% if foo = bar %}yes{% else %}no{% endif %}'},
test_once=True)
def test_if_tag_eq_deprecated(self):
reset_warning_registry()
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
output = self.engine.render_to_string('if-tag-eq-deprecated')
self.assertEqual(output, 'yes')
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"Operator '=' is deprecated and will be removed in Django 1.10. "
"Use '==' instead."
)
@ignore_warnings(category=RemovedInDjango110Warning)
class TestEqualitySingleEqualsSign(SimpleTestCase):
# The following tests should be changed to template.TemplateSyntaxError
# (or simply removed) when the deprecation path ends in Django 1.10.
@setup({'if-tag-eq01': '{% if foo = bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq01(self):
output = self.engine.render_to_string('if-tag-eq01', {'foo': 1})
self.assertEqual(output, 'no')
@setup({'if-tag-eq02': '{% if foo = bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq02(self):
output = self.engine.render_to_string('if-tag-eq02', {'foo': 1, 'bar': 1})
self.assertEqual(output, 'yes')
@setup({'if-tag-eq03': '{% if foo = bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq03(self):
output = self.engine.render_to_string('if-tag-eq03', {'foo': 1, 'bar': 2})
self.assertEqual(output, 'no')
@setup({'if-tag-eq04': '{% if foo == \'\' %}yes{% else %}no{% endif %}'})
def test_if_tag_eq04(self):
output = self.engine.render_to_string('if-tag-eq04')
self.assertEqual(output, 'no')
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
try:
from linode import api as linode_api
HAS_LINODE = True
except ImportError as ie:
HAS_LINODE = False
LINODE_IMPORT_ERROR = str(ie)
DOCUMENTATION = '''
---
module: linode_nodebalancer
short_description: Add / Delete / Update a linode nodebalancer
description:
- Wrapper around the linode nodebalancer api https://www.linode.com/api/nodebalancer
author: Duncan Morris (@duncanmorris)
requirements:
- This module runs locally, not on the remote server(s)
- It relies on the linode-python library https://github.com/tjfontaine/linode-python
options:
api_key:
required: false
type: string
description:
- Your linode api key, (see https://www.linode.com/docs/platform/api/api-key). You could pass it in directly to the modele, or set it as an environment variable (LINODE_API_KEY).
name:
required: false
type: string
description:
- The name of the NodeBalancer being targeted.
node_balancer_id:
required: false
type: integer
description:
- The id of the NodeBalancer being targeted. This is not exposed anywhere obvious (other than the api), so typically you would target via name. One of name, or node_balancer_id is required.
state:
required: false
choices: ['present', 'absent']
default: present
type: string
description:
- The desired state of the nodebalancer
datacenter_id:
required: false
default: 7 (London)
type: integer
description:
- The id of the linode datacenter the nodebalancer should be in. Must be an integer between 2 and 9. See linode for the full list - https://www.linode.com/api/utility/avail.datacenters
paymentterm:
required: false
type: integer
default: 1
choices: [1, 12, 24]
description: The payment term for the nodebalancer. One of 1, 12, or 24 months
client_conn_throttle:
required: false
default: 0
type: integer
description:
- Allowed connections per second, per client IP. 0 to disable.
'''
EXAMPLES = '''
- name: Ensure NodeBalancer Name is present
local_action:
module: linode_nodebalancer
api_key: "{{ linode_api_key }}"
name: "NodeBalancer Name"
state: present
'''
def handle_api_error(func):
"""A decorator that catches and api errors from the linode api and
returns ansible module fail_json.
An ansible module instance must be the first argument to the func
"""
def handle(*args, **kwargs):
try:
return func(*args, **kwargs)
except linode_api.ApiError as e:
code = e.value[0]['ERRORCODE']
err = e.value[0]['ERRORMESSAGE']
msg = "FATAL: Code [{code}] - {err}".format(code=code,
err=err)
return args[0].fail_json(msg=msg)
return handle
def nodebalancer_find(api, node_balancer_id, name):
"""Lookup and return a nodebalancer from the api.
If node_balancer_id is present, lookup based on that.
If not, lookup based on the name
"""
if node_balancer_id:
return api.nodebalancer_list(NodeBalancerID=node_balancer_id)
if name:
nodebalancers = api.nodebalancer_list()
for nb in nodebalancers:
if nb['LABEL'] == name:
return nb
return None
@handle_api_error
def linodeNodeBalancers(module, api, state, name, node_balancer_id,
datacenter_id, paymentterm, client_conn_throttle):
""" Ensure the given node balancer is in the correct state.
If it is present and is meant to be, then potentially update it to
ensure the other settings are up to date.
If it is present and isn't meant to be, then delete it
If it is absent but should be present, then create it, with the
given settings.
If it is correctly absent, then ignore
"""
changed = False
nodebalancer = nodebalancer_find(api, node_balancer_id, name)
if nodebalancer:
if state == "present":
if nodebalancer['LABEL'] != name or \
nodebalancer['CLIENTCONNTHROTTLE'] != client_conn_throttle:
new = api.nodebalancer_update(
NodeBalancerID=nodebalancer['NODEBALANCERID'],
Label=name,
ClientConnThrottle=client_conn_throttle)
changed = True
nodebalancer = nodebalancer_find(api,
new['NodeBalancerID'],
name)
elif state == "absent":
api.nodebalancer_delete(
NodeBalancerId=nodebalancer['NODEBALANCERID']
)
nodebalancer = None
changed = True
else:
if state == "present":
api.nodebalancer_create(DatacenterID=datacenter_id,
PaymentTerm=paymentterm,
Label=name)
changed = True
elif state == "absent":
pass
module.exit_json(changed=changed, instances=nodebalancer)
# ===========================================
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=False,
aliases=['linode_api_id'],
type='str'),
name=dict(required=False,
type='str'),
node_balancer_id=dict(required=False,
type='int'),
state=dict(required=False,
default='present',
choices=['present', 'absent'],
type='str'),
datacenter_id=dict(required=False,
default=7,
type='int'),
paymentterm=dict(required=False,
default=1,
choices=[1, 12, 24],
type='int'),
client_conn_throttle=dict(required=False,
default=0,
type='int')
),
required_one_of=[
['name', 'node_balancer_id']
],
supports_check_mode=False
)
if not HAS_LINODE:
module.fail_json(msg=LINODE_IMPORT_ERROR + " (pip install linode-python)")
api_key = module.params.get('api_key')
name = module.params.get('name')
node_balancer_id = module.params.get('node_balancer_id')
state = module.params.get('state')
datacenter_id = module.params.get('datacenter_id')
paymentterm = module.params.get('paymentterm')
client_conn_throttle = module.params.get('client_conn_throttle')
# Setup the api_key
if not api_key:
try:
api_key = os.environ['LINODE_API_KEY']
except KeyError, e:
module.fail_json(msg='Unable to load %s' % e.message)
# setup the auth
try:
api = linode_api.Api(api_key)
api.test_echo()
except Exception, e:
module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
linodeNodeBalancers(module, api, state, name, node_balancer_id,
datacenter_id, paymentterm, client_conn_throttle)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| |
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from eventlet import timeout as etimeout
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import units
from nova import exception
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.hyperv import test_base
from nova.virt import hardware
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmops
from nova.virt.hyperv import vmutils
CONF = cfg.CONF
class VMOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V VMOps class."""
_FAKE_TIMEOUT = 2
FAKE_SIZE = 10
FAKE_DIR = 'fake_dir'
FAKE_ROOT_PATH = 'C:\\path\\to\\fake.%s'
FAKE_CONFIG_DRIVE_ISO = 'configdrive.iso'
FAKE_CONFIG_DRIVE_VHD = 'configdrive.vhd'
FAKE_UUID = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
FAKE_LOG = 'fake_log'
ISO9660 = 'iso9660'
_FAKE_CONFIGDRIVE_PATH = 'C:/fake_instance_dir/configdrive.vhd'
def setUp(self):
super(VMOpsTestCase, self).setUp()
self.context = 'fake-context'
self._vmops = vmops.VMOps()
self._vmops._vmutils = mock.MagicMock()
self._vmops._vhdutils = mock.MagicMock()
self._vmops._pathutils = mock.MagicMock()
self._vmops._hostutils = mock.MagicMock()
@mock.patch('nova.virt.hyperv.vmops.importutils.import_object')
def test_load_vif_driver_class(self, mock_import_object):
self._vmops._load_vif_driver_class()
mock_import_object.assert_called_once_with(
self._vmops._vif_driver_class_map[CONF.network_api_class])
self.assertEqual(self._vmops._vif_driver,
mock_import_object.return_value)
@mock.patch('nova.virt.hyperv.vmops.importutils.import_object')
def test_load_vif_driver_class_error(self, mock_import_object):
mock_import_object.side_effect = KeyError
self.assertRaises(TypeError, self._vmops._load_vif_driver_class)
def test_list_instances(self):
mock_instance = mock.MagicMock()
self._vmops._vmutils.list_instances.return_value = [mock_instance]
response = self._vmops.list_instances()
self._vmops._vmutils.list_instances.assert_called_once_with()
self.assertEqual(response, [mock_instance])
def _test_get_info(self, vm_exists):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_info = mock.MagicMock(spec_set=dict)
fake_info = {'EnabledState': 2,
'MemoryUsage': mock.sentinel.FAKE_MEM_KB,
'NumberOfProcessors': mock.sentinel.FAKE_NUM_CPU,
'UpTime': mock.sentinel.FAKE_CPU_NS}
def getitem(key):
return fake_info[key]
mock_info.__getitem__.side_effect = getitem
expected = hardware.InstanceInfo(state=constants.HYPERV_POWER_STATE[2],
max_mem_kb=mock.sentinel.FAKE_MEM_KB,
mem_kb=mock.sentinel.FAKE_MEM_KB,
num_cpu=mock.sentinel.FAKE_NUM_CPU,
cpu_time_ns=mock.sentinel.FAKE_CPU_NS)
self._vmops._vmutils.vm_exists.return_value = vm_exists
self._vmops._vmutils.get_vm_summary_info.return_value = mock_info
if not vm_exists:
self.assertRaises(exception.InstanceNotFound,
self._vmops.get_info, mock_instance)
else:
response = self._vmops.get_info(mock_instance)
self._vmops._vmutils.vm_exists.assert_called_once_with(
mock_instance.name)
self._vmops._vmutils.get_vm_summary_info.assert_called_once_with(
mock_instance.name)
self.assertEqual(response, expected)
def test_get_info(self):
self._test_get_info(vm_exists=True)
def test_get_info_exception(self):
self._test_get_info(vm_exists=False)
def _prepare_create_root_vhd_mocks(self, use_cow_images, vhd_format,
vhd_size):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.root_gb = self.FAKE_SIZE
self.flags(use_cow_images=use_cow_images)
self._vmops._vhdutils.get_vhd_info.return_value = {'MaxInternalSize':
vhd_size * units.Gi}
self._vmops._vhdutils.get_vhd_format.return_value = vhd_format
root_vhd_internal_size = mock_instance.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
get_size.return_value = root_vhd_internal_size
self._vmops._pathutils.exists.return_value = True
return mock_instance
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd_exception(self, mock_get_cached_image,
vhd_format):
mock_instance = self._prepare_create_root_vhd_mocks(
use_cow_images=False, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE + 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
self.assertRaises(vmutils.VHDResizeException,
self._vmops._create_root_vhd, self.context,
mock_instance)
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
self._vmops._pathutils.exists.assert_called_once_with(
fake_root_path)
self._vmops._pathutils.remove.assert_called_once_with(
fake_root_path)
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd_qcow(self, mock_get_cached_image, vhd_format):
mock_instance = self._prepare_create_root_vhd_mocks(
use_cow_images=True, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE - 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
root_vhd_internal_size = mock_instance.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
response = self._vmops._create_root_vhd(context=self.context,
instance=mock_instance)
self.assertEqual(fake_root_path, response)
self._vmops._pathutils.get_root_vhd_path.assert_called_with(
mock_instance.name, vhd_format)
differencing_vhd = self._vmops._vhdutils.create_differencing_vhd
differencing_vhd.assert_called_with(fake_root_path, fake_vhd_path)
self._vmops._vhdutils.get_vhd_info.assert_called_once_with(
fake_vhd_path)
if vhd_format is constants.DISK_FORMAT_VHD:
self.assertFalse(get_size.called)
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
else:
get_size.assert_called_once_with(fake_vhd_path,
root_vhd_internal_size)
self._vmops._vhdutils.resize_vhd.assert_called_once_with(
fake_root_path, root_vhd_internal_size, is_file_max_size=False)
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd(self, mock_get_cached_image, vhd_format):
mock_instance = self._prepare_create_root_vhd_mocks(
use_cow_images=False, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE - 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
root_vhd_internal_size = mock_instance.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
response = self._vmops._create_root_vhd(context=self.context,
instance=mock_instance)
self.assertEqual(fake_root_path, response)
self._vmops._pathutils.get_root_vhd_path.assert_called_with(
mock_instance.name, vhd_format)
self._vmops._pathutils.copyfile.assert_called_once_with(
fake_vhd_path, fake_root_path)
get_size.assert_called_once_with(fake_vhd_path, root_vhd_internal_size)
self._vmops._vhdutils.resize_vhd.assert_called_once_with(
fake_root_path, root_vhd_internal_size, is_file_max_size=False)
def test_create_root_vhd(self):
self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHD)
def test_create_root_vhdx(self):
self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHDX)
def test_create_root_vhd_use_cow_images_true(self):
self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHD)
def test_create_root_vhdx_use_cow_images_true(self):
self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHDX)
def test_create_root_vhdx_size_less_than_internal(self):
self._test_create_root_vhd_exception(
vhd_format=constants.DISK_FORMAT_VHD)
def test_is_resize_needed_exception(self):
inst = mock.MagicMock()
self.assertRaises(
vmutils.VHDResizeException, self._vmops._is_resize_needed,
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE - 1, inst)
def test_is_resize_needed_true(self):
inst = mock.MagicMock()
self.assertTrue(self._vmops._is_resize_needed(
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE + 1, inst))
def test_is_resize_needed_false(self):
inst = mock.MagicMock()
self.assertFalse(self._vmops._is_resize_needed(
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE, inst))
def test_create_ephemeral_vhd(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.ephemeral_gb = self.FAKE_SIZE
best_supported = self._vmops._vhdutils.get_best_supported_vhd_format
best_supported.return_value = mock.sentinel.FAKE_FORMAT
self._vmops._pathutils.get_ephemeral_vhd_path.return_value = (
mock.sentinel.FAKE_PATH)
response = self._vmops.create_ephemeral_vhd(instance=mock_instance)
self._vmops._pathutils.get_ephemeral_vhd_path.assert_called_with(
mock_instance.name, mock.sentinel.FAKE_FORMAT)
self._vmops._vhdutils.create_dynamic_vhd.assert_called_with(
mock.sentinel.FAKE_PATH, mock_instance.ephemeral_gb * units.Gi,
mock.sentinel.FAKE_FORMAT)
self.assertEqual(mock.sentinel.FAKE_PATH, response)
@mock.patch('nova.virt.hyperv.vmops.VMOps.destroy')
@mock.patch('nova.virt.hyperv.vmops.VMOps.power_on')
@mock.patch('nova.virt.hyperv.vmops.VMOps.attach_config_drive')
@mock.patch('nova.virt.hyperv.vmops.VMOps._create_config_drive')
@mock.patch('nova.virt.configdrive.required_by')
@mock.patch('nova.virt.hyperv.vmops.VMOps.create_instance')
@mock.patch('nova.virt.hyperv.vmops.VMOps.get_image_vm_generation')
@mock.patch('nova.virt.hyperv.vmops.VMOps.create_ephemeral_vhd')
@mock.patch('nova.virt.hyperv.vmops.VMOps._create_root_vhd')
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps.'
'ebs_root_in_block_devices')
@mock.patch('nova.virt.hyperv.vmops.VMOps._delete_disk_files')
def _test_spawn(self, mock_delete_disk_files,
mock_ebs_root_in_block_devices, mock_create_root_vhd,
mock_create_ephemeral_vhd, mock_get_image_vm_gen,
mock_create_instance, mock_configdrive_required,
mock_create_config_drive, mock_attach_config_drive,
mock_power_on, mock_destroy, exists, boot_from_volume,
configdrive_required, fail):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_image_meta = mock.MagicMock()
fake_root_path = mock_create_root_vhd.return_value
fake_root_path = None if boot_from_volume else fake_root_path
fake_ephemeral_path = mock_create_ephemeral_vhd.return_value
fake_vm_gen = mock_get_image_vm_gen.return_value
fake_config_drive_path = mock_create_config_drive.return_value
self._vmops._vmutils.vm_exists.return_value = exists
mock_ebs_root_in_block_devices.return_value = boot_from_volume
mock_create_root_vhd.return_value = fake_root_path
mock_configdrive_required.return_value = configdrive_required
mock_create_instance.side_effect = fail
if exists:
self.assertRaises(exception.InstanceExists, self._vmops.spawn,
self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
elif fail is vmutils.HyperVException:
self.assertRaises(vmutils.HyperVException, self._vmops.spawn,
self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
mock_destroy.assert_called_once_with(mock_instance)
else:
self._vmops.spawn(self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
self._vmops._vmutils.vm_exists.assert_called_once_with(
mock_instance.name)
mock_delete_disk_files.assert_called_once_with(
mock_instance.name)
mock_ebs_root_in_block_devices.assert_called_once_with(
mock.sentinel.DEV_INFO)
if not boot_from_volume:
mock_create_root_vhd.assert_called_once_with(self.context,
mock_instance)
mock_create_ephemeral_vhd.assert_called_once_with(mock_instance)
mock_get_image_vm_gen.assert_called_once_with(fake_root_path,
mock_image_meta)
mock_create_instance.assert_called_once_with(
mock_instance, mock.sentinel.INFO, mock.sentinel.DEV_INFO,
fake_root_path, fake_ephemeral_path, fake_vm_gen)
mock_configdrive_required.assert_called_once_with(mock_instance)
if configdrive_required:
mock_create_config_drive.assert_called_once_with(
mock_instance, [mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.INFO)
mock_attach_config_drive.assert_called_once_with(
mock_instance, fake_config_drive_path, fake_vm_gen)
mock_power_on.assert_called_once_with(mock_instance)
def test_spawn(self):
self._test_spawn(exists=False, boot_from_volume=False,
configdrive_required=True, fail=None)
def test_spawn_instance_exists(self):
self._test_spawn(exists=True, boot_from_volume=False,
configdrive_required=True, fail=None)
def test_spawn_create_instance_exception(self):
self._test_spawn(exists=False, boot_from_volume=False,
configdrive_required=True,
fail=vmutils.HyperVException)
def test_spawn_not_required(self):
self._test_spawn(exists=False, boot_from_volume=False,
configdrive_required=False, fail=None)
def test_spawn_root_in_block(self):
self._test_spawn(exists=False, boot_from_volume=True,
configdrive_required=False, fail=None)
def test_spawn_no_admin_permissions(self):
self._vmops._vmutils.check_admin_permissions.side_effect = (
vmutils.HyperVException)
self.assertRaises(vmutils.HyperVException,
self._vmops.spawn,
self.context, mock.DEFAULT, mock.DEFAULT,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps'
'.attach_volumes')
@mock.patch.object(vmops.VMOps, '_attach_drive')
def _test_create_instance(self, mock_attach_drive, mock_attach_volumes,
fake_root_path, fake_ephemeral_path,
enable_instance_metrics,
vm_gen=constants.VM_GEN_1):
mock_vif_driver = mock.MagicMock()
self._vmops._vif_driver = mock_vif_driver
self.flags(enable_instance_metrics_collection=enable_instance_metrics,
group='hyperv')
fake_network_info = {'id': mock.sentinel.ID,
'address': mock.sentinel.ADDRESS}
mock_instance = fake_instance.fake_instance_obj(self.context)
instance_path = os.path.join(CONF.instances_path, mock_instance.name)
self._vmops.create_instance(instance=mock_instance,
network_info=[fake_network_info],
block_device_info=mock.sentinel.DEV_INFO,
root_vhd_path=fake_root_path,
eph_vhd_path=fake_ephemeral_path,
vm_gen=vm_gen)
self._vmops._vmutils.create_vm.assert_called_once_with(
mock_instance.name, mock_instance.memory_mb,
mock_instance.vcpus, CONF.hyperv.limit_cpu_features,
CONF.hyperv.dynamic_memory_ratio, vm_gen, instance_path,
[mock_instance.uuid])
expected = []
ctrl_type = vmops.VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
ctrl_disk_addr = 0
if fake_root_path:
expected.append(mock.call(mock_instance.name, fake_root_path,
0, ctrl_disk_addr, ctrl_type,
constants.DISK))
ctrl_disk_addr += 1
if fake_ephemeral_path:
expected.append(mock.call(mock_instance.name,
fake_ephemeral_path, 0, ctrl_disk_addr,
ctrl_type, constants.DISK))
mock_attach_drive.has_calls(expected)
self._vmops._vmutils.create_scsi_controller.assert_called_once_with(
mock_instance.name)
ebs_root = vm_gen is not constants.VM_GEN_2 and fake_root_path is None
mock_attach_volumes.assert_called_once_with(mock.sentinel.DEV_INFO,
mock_instance.name,
ebs_root)
self._vmops._vmutils.create_nic.assert_called_once_with(
mock_instance.name, mock.sentinel.ID, mock.sentinel.ADDRESS)
mock_vif_driver.plug.assert_called_once_with(mock_instance,
fake_network_info)
mock_enable = self._vmops._vmutils.enable_vm_metrics_collection
if enable_instance_metrics:
mock_enable.assert_called_once_with(mock_instance.name)
def test_create_instance(self):
fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH
self._test_create_instance(fake_root_path=mock.sentinel.FAKE_ROOT_PATH,
fake_ephemeral_path=fake_ephemeral_path,
enable_instance_metrics=True)
def test_create_instance_no_root_path(self):
fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH
self._test_create_instance(fake_root_path=None,
fake_ephemeral_path=fake_ephemeral_path,
enable_instance_metrics=True)
def test_create_instance_no_ephemeral_path(self):
self._test_create_instance(fake_root_path=mock.sentinel.FAKE_ROOT_PATH,
fake_ephemeral_path=None,
enable_instance_metrics=True)
def test_create_instance_no_path(self):
self._test_create_instance(fake_root_path=None,
fake_ephemeral_path=None,
enable_instance_metrics=False)
def test_create_instance_enable_instance_metrics_false(self):
fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH
self._test_create_instance(fake_root_path=mock.sentinel.FAKE_ROOT_PATH,
fake_ephemeral_path=fake_ephemeral_path,
enable_instance_metrics=False)
def test_create_instance_gen2(self):
self._test_create_instance(fake_root_path=None,
fake_ephemeral_path=None,
enable_instance_metrics=False,
vm_gen=constants.VM_GEN_2)
def test_attach_drive_vm_to_scsi(self):
self._vmops._attach_drive(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.CTRL_TYPE_SCSI)
self._vmops._vmutils.attach_scsi_drive.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
constants.DISK)
def test_attach_drive_vm_to_ide(self):
self._vmops._attach_drive(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.CTRL_TYPE_IDE)
self._vmops._vmutils.attach_ide_drive.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.DISK)
def _check_get_image_vm_gen_except(self, image_prop):
image_meta = {"properties": {constants.IMAGE_PROP_VM_GEN: image_prop}}
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
self.assertRaises(vmutils.HyperVException,
self._vmops.get_image_vm_generation,
mock.sentinel.FAKE_PATH,
image_meta)
def test_get_image_vm_generation_default(self):
image_meta = {"properties": {}}
self._vmops._hostutils.get_default_vm_generation.return_value = (
constants.IMAGE_PROP_VM_GEN_1)
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
response = self._vmops.get_image_vm_generation(mock.sentinel.FAKE_PATH,
image_meta)
self.assertEqual(constants.VM_GEN_1, response)
def test_get_image_vm_generation_gen2(self):
image_meta = {"properties": {
constants.IMAGE_PROP_VM_GEN: constants.IMAGE_PROP_VM_GEN_2}}
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
self._vmops._vhdutils.get_vhd_format.return_value = (
constants.DISK_FORMAT_VHDX)
response = self._vmops.get_image_vm_generation(mock.sentinel.FAKE_PATH,
image_meta)
self.assertEqual(constants.VM_GEN_2, response)
def test_get_image_vm_generation_bad_prop(self):
self._check_get_image_vm_gen_except(mock.sentinel.FAKE_IMAGE_PROP)
def test_get_image_vm_generation_not_vhdx(self):
self._vmops._vhdutils.get_vhd_format.return_value = (
constants.DISK_FORMAT_VHD)
self._check_get_image_vm_gen_except(constants.IMAGE_PROP_VM_GEN_2)
@mock.patch('nova.api.metadata.base.InstanceMetadata')
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder')
@mock.patch('nova.utils.execute')
def _test_create_config_drive(self, mock_execute, mock_ConfigDriveBuilder,
mock_InstanceMetadata, config_drive_format,
config_drive_cdrom, side_effect):
mock_instance = fake_instance.fake_instance_obj(self.context)
self.flags(config_drive_format=config_drive_format)
self.flags(config_drive_cdrom=config_drive_cdrom, group='hyperv')
self.flags(config_drive_inject_password=True, group='hyperv')
self._vmops._pathutils.get_instance_dir.return_value = (
self.FAKE_DIR)
mock_ConfigDriveBuilder().__enter__().make_drive.side_effect = [
side_effect]
if config_drive_format != self.ISO9660:
self.assertRaises(vmutils.UnsupportedConfigDriveFormatException,
self._vmops._create_config_drive,
mock_instance, [mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO)
elif side_effect is processutils.ProcessExecutionError:
self.assertRaises(processutils.ProcessExecutionError,
self._vmops._create_config_drive,
mock_instance, [mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO)
else:
path = self._vmops._create_config_drive(mock_instance,
[mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO)
mock_InstanceMetadata.assert_called_once_with(
mock_instance, content=[mock.sentinel.FILE],
extra_md={'admin_pass': mock.sentinel.PASSWORD},
network_info=mock.sentinel.NET_INFO)
self._vmops._pathutils.get_instance_dir.assert_called_once_with(
mock_instance.name)
mock_ConfigDriveBuilder.assert_called_with(
instance_md=mock_InstanceMetadata())
mock_make_drive = mock_ConfigDriveBuilder().__enter__().make_drive
path_iso = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO)
path_vhd = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_VHD)
mock_make_drive.assert_called_once_with(path_iso)
if not CONF.hyperv.config_drive_cdrom:
expected = path_vhd
mock_execute.assert_called_once_with(
CONF.hyperv.qemu_img_cmd,
'convert', '-f', 'raw', '-O', 'vpc',
path_iso, path_vhd, attempts=1)
self._vmops._pathutils.remove.assert_called_once_with(
os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO))
else:
expected = path_iso
self.assertEqual(expected, path)
def test_create_config_drive_cdrom(self):
self._test_create_config_drive(config_drive_format=self.ISO9660,
config_drive_cdrom=True,
side_effect=None)
def test_create_config_drive_vhd(self):
self._test_create_config_drive(config_drive_format=self.ISO9660,
config_drive_cdrom=False,
side_effect=None)
def test_create_config_drive_other_drive_format(self):
self._test_create_config_drive(config_drive_format=mock.sentinel.OTHER,
config_drive_cdrom=False,
side_effect=None)
def test_create_config_drive_execution_error(self):
self._test_create_config_drive(
config_drive_format=self.ISO9660,
config_drive_cdrom=False,
side_effect=processutils.ProcessExecutionError)
def test_attach_config_drive_exception(self):
instance = fake_instance.fake_instance_obj(self.context)
self.assertRaises(exception.InvalidDiskFormat,
self._vmops.attach_config_drive,
instance, 'C:/fake_instance_dir/configdrive.xxx',
constants.VM_GEN_1)
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_config_drive(self, mock_attach_drive):
instance = fake_instance.fake_instance_obj(self.context)
self._vmops.attach_config_drive(instance,
self._FAKE_CONFIGDRIVE_PATH,
constants.VM_GEN_1)
mock_attach_drive.assert_called_once_with(
instance.name, self._FAKE_CONFIGDRIVE_PATH,
1, 0, constants.CTRL_TYPE_IDE, constants.DISK)
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_config_drive_gen2(self, mock_attach_drive):
instance = fake_instance.fake_instance_obj(self.context)
self._vmops.attach_config_drive(instance,
self._FAKE_CONFIGDRIVE_PATH,
constants.VM_GEN_2)
mock_attach_drive.assert_called_once_with(
instance.name, self._FAKE_CONFIGDRIVE_PATH,
1, 0, constants.CTRL_TYPE_SCSI, constants.DISK)
def test_delete_disk_files(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._delete_disk_files(mock_instance.name)
self._vmops._pathutils.get_instance_dir.assert_called_once_with(
mock_instance.name, create_dir=False, remove_dir=True)
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps.disconnect_volumes')
@mock.patch('nova.virt.hyperv.vmops.VMOps._delete_disk_files')
@mock.patch('nova.virt.hyperv.vmops.VMOps.power_off')
def test_destroy(self, mock_power_off, mock_delete_disk_files,
mock_disconnect_volumes):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.vm_exists.return_value = True
self._vmops.destroy(instance=mock_instance,
block_device_info=mock.sentinel.FAKE_BD_INFO)
self._vmops._vmutils.vm_exists.assert_called_with(
mock_instance.name)
mock_power_off.assert_called_once_with(mock_instance)
self._vmops._vmutils.destroy_vm.assert_called_once_with(
mock_instance.name)
mock_disconnect_volumes.assert_called_once_with(
mock.sentinel.FAKE_BD_INFO)
mock_delete_disk_files.assert_called_once_with(
mock_instance.name)
def test_destroy_inexistent_instance(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.vm_exists.return_value = False
self._vmops.destroy(instance=mock_instance)
self.assertFalse(self._vmops._vmutils.destroy_vm.called)
@mock.patch('nova.virt.hyperv.vmops.VMOps.power_off')
def test_destroy_exception(self, mock_power_off):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.destroy_vm.side_effect = vmutils.HyperVException
self._vmops._vmutils.vm_exists.return_value = True
self.assertRaises(vmutils.HyperVException,
self._vmops.destroy, mock_instance)
def test_reboot_hard(self):
self._test_reboot(vmops.REBOOT_TYPE_HARD,
constants.HYPERV_VM_STATE_REBOOT)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_reboot_soft(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = True
self._test_reboot(vmops.REBOOT_TYPE_SOFT,
constants.HYPERV_VM_STATE_ENABLED)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_reboot_soft_failed(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = False
self._test_reboot(vmops.REBOOT_TYPE_SOFT,
constants.HYPERV_VM_STATE_REBOOT)
@mock.patch("nova.virt.hyperv.vmops.VMOps.power_on")
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_reboot_soft_exception(self, mock_soft_shutdown, mock_power_on):
mock_soft_shutdown.return_value = True
mock_power_on.side_effect = vmutils.HyperVException("Expected failure")
instance = fake_instance.fake_instance_obj(self.context)
self.assertRaises(vmutils.HyperVException, self._vmops.reboot,
instance, {}, vmops.REBOOT_TYPE_SOFT)
mock_soft_shutdown.assert_called_once_with(instance)
mock_power_on.assert_called_once_with(instance)
def _test_reboot(self, reboot_type, vm_state):
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
self._vmops.reboot(instance, {}, reboot_type)
mock_set_state.assert_called_once_with(instance, vm_state)
@mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.return_value = True
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_once_with(instance.name)
mock_wait_for_power_off.assert_called_once_with(
instance.name, self._FAKE_TIMEOUT)
self.assertTrue(result)
@mock.patch("time.sleep")
def test_soft_shutdown_failed(self, mock_sleep):
instance = fake_instance.fake_instance_obj(self.context)
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.side_effect = vmutils.HyperVException(
"Expected failure.")
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
mock_shutdown_vm.assert_called_once_with(instance.name)
self.assertFalse(result)
@mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown_wait(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.side_effect = [False, True]
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1)
calls = [mock.call(instance.name, 1),
mock.call(instance.name, self._FAKE_TIMEOUT - 1)]
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_with(instance.name)
mock_wait_for_power_off.assert_has_calls(calls)
self.assertTrue(result)
@mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown_wait_timeout(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.return_value = False
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1.5)
calls = [mock.call(instance.name, 1.5),
mock.call(instance.name, self._FAKE_TIMEOUT - 1.5)]
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_with(instance.name)
mock_wait_for_power_off.assert_has_calls(calls)
self.assertFalse(result)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_pause(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.pause(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_PAUSED)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_unpause(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.unpause(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_ENABLED)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_suspend(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.suspend(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_SUSPENDED)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_resume(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.resume(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_ENABLED)
def _test_power_off(self, timeout):
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
self._vmops.power_off(instance, timeout)
mock_set_state.assert_called_once_with(
instance, constants.HYPERV_VM_STATE_DISABLED)
def test_power_off_hard(self):
self._test_power_off(timeout=0)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_power_off_exception(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = False
self._test_power_off(timeout=1)
@mock.patch("nova.virt.hyperv.vmops.VMOps._set_vm_state")
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_power_off_soft(self, mock_soft_shutdown, mock_set_state):
instance = fake_instance.fake_instance_obj(self.context)
mock_soft_shutdown.return_value = True
self._vmops.power_off(instance, 1, 0)
mock_soft_shutdown.assert_called_once_with(
instance, 1, vmops.SHUTDOWN_TIME_INCREMENT)
self.assertFalse(mock_set_state.called)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_power_on(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.power_on(mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_ENABLED)
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps'
'.fix_instance_volume_disk_paths')
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_power_on_having_block_devices(self, mock_set_vm_state,
mock_fix_instance_vol_paths):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.power_on(mock_instance, mock.sentinel.block_device_info)
mock_fix_instance_vol_paths.assert_called_once_with(
mock_instance.name, mock.sentinel.block_device_info)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_ENABLED)
@mock.patch.object(vmops.VMOps, 'log_vm_serial_output')
@mock.patch.object(vmops.VMOps, '_delete_vm_console_log')
def _test_set_vm_state(self, mock_delete_vm_console_log,
mock_log_vm_output, state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._set_vm_state(mock_instance, state)
self._vmops._vmutils.set_vm_state.assert_called_once_with(
mock_instance.name, state)
if state in (constants.HYPERV_VM_STATE_DISABLED,
constants.HYPERV_VM_STATE_REBOOT):
mock_delete_vm_console_log.assert_called_once_with(mock_instance)
if state in (constants.HYPERV_VM_STATE_ENABLED,
constants.HYPERV_VM_STATE_REBOOT):
mock_log_vm_output.assert_called_once_with(mock_instance.name,
mock_instance.uuid)
def test_set_vm_state_disabled(self):
self._test_set_vm_state(state=constants.HYPERV_VM_STATE_DISABLED)
def test_set_vm_state_enabled(self):
self._test_set_vm_state(state=constants.HYPERV_VM_STATE_ENABLED)
def test_set_vm_state_reboot(self):
self._test_set_vm_state(state=constants.HYPERV_VM_STATE_REBOOT)
def test_set_vm_state_exception(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.set_vm_state.side_effect = vmutils.HyperVException
self.assertRaises(vmutils.HyperVException, self._vmops._set_vm_state,
mock_instance, mock.sentinel.STATE)
def test_get_vm_state(self):
summary_info = {'EnabledState': constants.HYPERV_VM_STATE_DISABLED}
with mock.patch.object(self._vmops._vmutils,
'get_vm_summary_info') as mock_get_summary_info:
mock_get_summary_info.return_value = summary_info
response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
self.assertEqual(response, constants.HYPERV_VM_STATE_DISABLED)
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def test_wait_for_power_off_true(self, mock_get_state):
mock_get_state.return_value = constants.HYPERV_VM_STATE_DISABLED
result = self._vmops._wait_for_power_off(
mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME)
self.assertTrue(result)
@mock.patch.object(vmops.etimeout, "with_timeout")
def test_wait_for_power_off_false(self, mock_with_timeout):
mock_with_timeout.side_effect = etimeout.Timeout()
result = self._vmops._wait_for_power_off(
mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
self.assertFalse(result)
def test_copy_vm_console_logs(self):
fake_local_paths = (mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_PATH_ARCHIVED)
fake_remote_paths = (mock.sentinel.FAKE_REMOTE_PATH,
mock.sentinel.FAKE_REMOTE_PATH_ARCHIVED)
self._vmops._pathutils.get_vm_console_log_paths.side_effect = [
fake_local_paths, fake_remote_paths]
self._vmops._pathutils.exists.side_effect = [True, False]
self._vmops.copy_vm_console_logs(mock.sentinel.FAKE_VM_NAME,
mock.sentinel.FAKE_DEST)
calls = [mock.call(mock.sentinel.FAKE_VM_NAME),
mock.call(mock.sentinel.FAKE_VM_NAME,
remote_server=mock.sentinel.FAKE_DEST)]
self._vmops._pathutils.get_vm_console_log_paths.assert_has_calls(calls)
calls = [mock.call(mock.sentinel.FAKE_PATH),
mock.call(mock.sentinel.FAKE_PATH_ARCHIVED)]
self._vmops._pathutils.exists.assert_has_calls(calls)
self._vmops._pathutils.copy.assert_called_once_with(
mock.sentinel.FAKE_PATH, mock.sentinel.FAKE_REMOTE_PATH)
@mock.patch.object(vmops.ioutils, 'IOThread')
def test_log_vm_serial_output(self, fake_iothread):
self._vmops._pathutils.get_vm_console_log_paths.return_value = [
mock.sentinel.FAKE_PATH]
self._vmops.log_vm_serial_output(mock.sentinel.FAKE_VM_NAME,
self.FAKE_UUID)
pipe_path = r'\\.\pipe\%s' % self.FAKE_UUID
fake_iothread.assert_called_once_with(
pipe_path, mock.sentinel.FAKE_PATH,
self._vmops._MAX_CONSOLE_LOG_FILE_SIZE)
fake_iothread.return_value.start.assert_called_once_with()
@mock.patch("os.path.exists")
def test_get_console_output(self, fake_path_exists):
mock_instance = fake_instance.fake_instance_obj(self.context)
fake_path_exists.return_value = True
self._vmops._pathutils.get_vm_console_log_paths.return_value = (
mock.sentinel.FAKE_PATH, mock.sentinel.FAKE_PATH_ARCHIVED)
with mock.patch('nova.virt.hyperv.vmops.open',
mock.mock_open(read_data=self.FAKE_LOG), create=True):
instance_log = self._vmops.get_console_output(mock_instance)
# get_vm_console_log_paths returns 2 paths.
self.assertEqual(self.FAKE_LOG * 2, instance_log)
expected_calls = [mock.call(mock.sentinel.FAKE_PATH_ARCHIVED),
mock.call(mock.sentinel.FAKE_PATH)]
fake_path_exists.assert_has_calls(expected_calls, any_order=False)
@mock.patch("__builtin__.open")
@mock.patch("os.path.exists")
def test_get_console_output_exception(self, fake_path_exists, fake_open):
fake_vm = mock.MagicMock()
fake_open.side_effect = vmutils.HyperVException
fake_path_exists.return_value = True
self._vmops._pathutils.get_vm_console_log_paths.return_value = (
mock.sentinel.fake_console_log_path,
mock.sentinel.fake_console_log_archived)
with mock.patch('nova.virt.hyperv.vmops.open', fake_open, create=True):
self.assertRaises(vmutils.HyperVException,
self._vmops.get_console_output,
fake_vm)
@mock.patch.object(vmops.fileutils, 'delete_if_exists')
def test_delete_vm_console_log(self, mock_delete_if_exists):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._pathutils.get_vm_console_log_paths.return_value = (
mock.sentinel.FAKE_PATH, )
mock_log_writer = mock.MagicMock()
self._vmops._vm_log_writers[mock_instance['uuid']] = mock_log_writer
self._vmops._delete_vm_console_log(mock_instance)
mock_log_writer.join.assert_called_once_with()
mock_delete_if_exists.assert_called_once_with(mock.sentinel.FAKE_PATH)
def test_create_vm_com_port_pipe(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
pipe_path = r'\\.\pipe\%s' % mock_instance['uuid']
self._vmops._create_vm_com_port_pipe(mock_instance)
get_vm_serial_port = self._vmops._vmutils.get_vm_serial_port_connection
get_vm_serial_port.assert_called_once_with(mock_instance['name'],
update_connection=pipe_path)
@mock.patch.object(vmops.VMOps, "log_vm_serial_output")
@mock.patch("os.path.basename")
@mock.patch("os.path.exists")
def test_restart_vm_log_writers(self, mock_exists, mock_basename,
mock_log_vm_output):
self._vmops._vmutils.get_active_instances.return_value = [
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_VM_NAME_OTHER]
mock_exists.side_effect = [True, False]
self._vmops.restart_vm_log_writers()
calls = [mock.call(mock.sentinel.FAKE_VM_NAME),
mock.call(mock.sentinel.FAKE_VM_NAME_OTHER)]
self._vmops._pathutils.get_instance_dir.assert_has_calls(calls)
get_vm_serial_port = self._vmops._vmutils.get_vm_serial_port_connection
get_vm_serial_port.assert_called_once_with(mock.sentinel.FAKE_VM_NAME)
mock_log_vm_output.assert_called_once_with(mock.sentinel.FAKE_VM_NAME,
mock_basename.return_value)
def test_list_instance_uuids(self):
fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
with mock.patch.object(self._vmops._vmutils,
'list_instance_notes') as mock_list_notes:
mock_list_notes.return_value = [('fake_name', [fake_uuid])]
response = self._vmops.list_instance_uuids()
mock_list_notes.assert_called_once_with()
self.assertEqual(response, [fake_uuid])
def test_copy_vm_dvd_disks(self):
fake_paths = [mock.sentinel.FAKE_DVD_PATH1,
mock.sentinel.FAKE_DVD_PATH2]
mock_copy = self._vmops._pathutils.copyfile
mock_get_dvd_disk_paths = self._vmops._vmutils.get_vm_dvd_disk_paths
mock_get_dvd_disk_paths.return_value = fake_paths
self._vmops._pathutils.get_instance_dir.return_value = (
mock.sentinel.FAKE_DEST_PATH)
self._vmops.copy_vm_dvd_disks(mock.sentinel.FAKE_VM_NAME,
mock.sentinel.FAKE_DEST_HOST)
mock_get_dvd_disk_paths.assert_called_with(mock.sentinel.FAKE_VM_NAME)
self._vmops._pathutils.get_instance_dir.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME,
remote_server=mock.sentinel.FAKE_DEST_HOST)
mock_copy.has_calls(mock.call(mock.sentinel.FAKE_DVD_PATH1,
mock.sentinel.FAKE_DEST_PATH),
mock.call(mock.sentinel.FAKE_DVD_PATH2,
mock.sentinel.FAKE_DEST_PATH))
| |
#!/usr/bin/python
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
short_description: NetApp ONTAP manage consistency group snapshot
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create consistency group snapshot for ONTAP volumes.
extends_documentation_fragment:
- netapp.na_ontap
module: na_ontap_cg_snapshot
options:
state:
description:
- If you want to create a snapshot.
default: present
vserver:
required: true
description:
- Name of the vserver.
volumes:
required: true
description:
- A list of volumes in this filer that is part of this CG operation.
snapshot:
required: true
description:
- The provided name of the snapshot that is created in each volume.
timeout:
description:
- Timeout selector.
choices: ['urgent', 'medium', 'relaxed']
default: medium
snapmirror_label:
description:
- A human readable SnapMirror label to be attached with the consistency group snapshot copies.
version_added: "2.7"
'''
EXAMPLES = """
- name:
na_ontap_cg_snapshot:
state: present
vserver: vserver_name
snapshot: snapshot name
volumes: vol_name
username: "{{ netapp username }}"
password: "{{ netapp password }}"
hostname: "{{ netapp hostname }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPCGSnapshot(object):
"""
Methods to create CG snapshots
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, default='present'),
vserver=dict(required=True, type='str'),
volumes=dict(required=True, type='list'),
snapshot=dict(required=True, type='str'),
timeout=dict(required=False, type='str', choices=[
'urgent', 'medium', 'relaxed'], default='medium'),
snapmirror_label=dict(required=False, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
parameters = self.module.params
# set up variables
self.state = parameters['state']
self.vserver = parameters['vserver']
self.volumes = parameters['volumes']
self.snapshot = parameters['snapshot']
self.timeout = parameters['timeout']
self.snapmirror_label = parameters['snapmirror_label']
self.cgid = None
if HAS_NETAPP_LIB is False:
self.module.fail_json(
msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(
module=self.module, vserver=self.vserver)
def does_snapshot_exist(self, volume):
"""
This is duplicated from na_ontap_snapshot
Checks to see if a snapshot exists or not
:return: Return True if a snapshot exists, false if it doesn't
"""
# TODO: Remove this method and import snapshot module and
# call get after re-factoring __init__ across all the modules
# we aren't importing now, since __init__ does a lot of Ansible setup
snapshot_obj = netapp_utils.zapi.NaElement("snapshot-get-iter")
desired_attr = netapp_utils.zapi.NaElement("desired-attributes")
snapshot_info = netapp_utils.zapi.NaElement('snapshot-info')
comment = netapp_utils.zapi.NaElement('comment')
# add more desired attributes that are allowed to be modified
snapshot_info.add_child_elem(comment)
desired_attr.add_child_elem(snapshot_info)
snapshot_obj.add_child_elem(desired_attr)
# compose query
query = netapp_utils.zapi.NaElement("query")
snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
snapshot_info_obj.add_new_child("name", self.snapshot)
snapshot_info_obj.add_new_child("volume", volume)
snapshot_info_obj.add_new_child("vserver", self.vserver)
query.add_child_elem(snapshot_info_obj)
snapshot_obj.add_child_elem(query)
result = self.server.invoke_successfully(snapshot_obj, True)
return_value = None
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) == 1:
attributes_list = result.get_child_by_name('attributes-list')
snap_info = attributes_list.get_child_by_name('snapshot-info')
return_value = {'comment': snap_info.get_child_content('comment')}
return return_value
def cgcreate(self):
"""
Calls cg-start and cg-commit (when cg-start succeeds)
"""
started = self.cg_start()
if started:
if self.cgid is not None:
self.cg_commit()
else:
self.module.fail_json(msg="Error fetching CG ID for CG commit %s" % self.snapshot,
exception=traceback.format_exc())
return started
def cg_start(self):
"""
For the given list of volumes, creates cg-snapshot
"""
snapshot_started = False
cgstart = netapp_utils.zapi.NaElement("cg-start")
cgstart.add_new_child("snapshot", self.snapshot)
cgstart.add_new_child("timeout", self.timeout)
volume_list = netapp_utils.zapi.NaElement("volumes")
cgstart.add_child_elem(volume_list)
for vol in self.volumes:
snapshot_exists = self.does_snapshot_exist(vol)
if snapshot_exists is None:
snapshot_started = True
volume_list.add_new_child("volume-name", vol)
if snapshot_started:
if self.snapmirror_label:
cgstart.add_new_child("snapmirror-label",
self.snapmirror_label)
try:
cgresult = self.server.invoke_successfully(
cgstart, enable_tunneling=True)
if cgresult.get_child_by_name('cg-id'):
self.cgid = cgresult['cg-id']
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error creating CG snapshot %s: %s" %
(self.snapshot, to_native(error)),
exception=traceback.format_exc())
return snapshot_started
def cg_commit(self):
"""
When cg-start is successful, performs a cg-commit with the cg-id
"""
cgcommit = netapp_utils.zapi.NaElement.create_node_with_children(
'cg-commit', **{'cg-id': self.cgid})
try:
self.server.invoke_successfully(cgcommit,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error committing CG snapshot %s: %s" %
(self.snapshot, to_native(error)),
exception=traceback.format_exc())
def apply(self):
'''Applies action from playbook'''
netapp_utils.ems_log_event("na_ontap_cg_snapshot", self.server)
changed = self.cgcreate()
self.module.exit_json(changed=changed)
def main():
'''Execute action from playbook'''
cg_obj = NetAppONTAPCGSnapshot()
cg_obj.apply()
if __name__ == '__main__':
main()
| |
"""AuthZ Adapter implementations of logging managers."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from . import sessions
from ..osid import managers as osid_managers
from ..osid.osid_errors import Unimplemented
from ..osid.osid_errors import Unimplemented, OperationFailed, Unsupported
from ..primitives import Id
from ..utilities import raise_null_argument
from dlkit.manager_impls.logging_ import managers as logging_managers
class LoggingProfile(osid_managers.OsidProfile, logging_managers.LoggingProfile):
"""Adapts underlying LoggingProfile methodswith authorization checks."""
def __init__(self):
osid_managers.OsidProfile.__init__(self)
def _get_hierarchy_session(self, proxy=None):
if proxy is not None:
try:
return self._provider_manager.get_log_hierarchy_session(proxy)
except Unimplemented:
return None
try:
return self._provider_manager.get_log_hierarchy_session()
except Unimplemented:
return None
def supports_logging(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_logging()
def supports_log_entry_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_log_entry_lookup()
def supports_log_entry_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_log_entry_query()
def supports_log_entry_log(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_log_entry_log()
def supports_log_entry_log_assignment(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_log_entry_log_assignment()
def supports_log_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_log_lookup()
def supports_log_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_log_admin()
def supports_log_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_log_hierarchy()
def supports_log_hierarchy_design(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_log_hierarchy_design()
def get_log_entry_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_log_entry_record_types()
log_entry_record_types = property(fget=get_log_entry_record_types)
def get_log_entry_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_log_entry_search_record_types()
log_entry_search_record_types = property(fget=get_log_entry_search_record_types)
def get_log_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_log_record_types()
log_record_types = property(fget=get_log_record_types)
def get_log_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_log_search_record_types()
log_search_record_types = property(fget=get_log_search_record_types)
def get_priority_types(self):
# Implemented from azosid template for -
# osid.logging.LoggingProfile.get_priority_types
return self._provider_manager.get_priority_types()
priority_types = property(fget=get_priority_types)
def get_content_types(self):
# Implemented from azosid template for -
# osid.logging.LoggingProfile.get_content_types
return self._provider_manager.get_content_types()
content_types = property(fget=get_content_types)
def supports_log_entry_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_log_entry_admin()
class LoggingManager(osid_managers.OsidManager, LoggingProfile, logging_managers.LoggingManager):
"""Adapts underlying LoggingManager methodswith authorization checks."""
def __init__(self):
LoggingProfile.__init__(self)
def initialize(self, runtime):
osid_managers.OsidManager.initialize(self, runtime)
config = self._my_runtime.get_configuration()
parameter_id = Id('parameter:loggingProviderImpl@authz_adapter')
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
self._provider_manager = runtime.get_manager('LOGGING', provider_impl)
# need to add version argument
def get_logging_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LoggingSession')(
provider_session=self._provider_manager.get_logging_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
logging_session = property(fget=get_logging_session)
@raise_null_argument
def get_logging_session_for_log(self, log_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'LoggingSession')(
provider_session=self._provider_manager.get_logging_session_for_log(log_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_log_entry_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_log_entry_query_session()
query_session.use_federated_log_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'LogEntryLookupSession')(
provider_session=self._provider_manager.get_log_entry_lookup_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
log_entry_lookup_session = property(fget=get_log_entry_lookup_session)
@raise_null_argument
def get_log_entry_lookup_session_for_log(self, log_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_log_entry_query_session_for_log(log_id)
query_session.use_federated_log_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'LogEntryLookupSession')(
provider_session=self._provider_manager.get_log_entry_lookup_session_for_log(log_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_log_entry_query_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_log_entry_query_session()
query_session.use_federated_log_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'LogEntryQuerySession')(
provider_session=self._provider_manager.get_log_entry_query_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
log_entry_query_session = property(fget=get_log_entry_query_session)
@raise_null_argument
def get_log_entry_query_session_for_log(self, log_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_log_entry_query_session_for_log(log_id)
query_session.use_federated_log_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'LogEntryQuerySession')(
provider_session=self._provider_manager.get_log_entry_query_session_for_log(log_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_log_entry_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LogEntryAdminSession')(
provider_session=self._provider_manager.get_log_entry_admin_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
log_entry_admin_session = property(fget=get_log_entry_admin_session)
@raise_null_argument
def get_log_entry_admin_session_for_log(self, log_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'LogEntryAdminSession')(
provider_session=self._provider_manager.get_log_entry_admin_session_for_log(log_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_log_entry_log_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LogEntryLogSession')(
provider_session=self._provider_manager.get_log_entry_log_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
log_entry_log_session = property(fget=get_log_entry_log_session)
def get_log_entry_log_assignment_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LogEntryLogAssignmentSession')(
provider_session=self._provider_manager.get_log_entry_log_assignment_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
log_entry_log_assignment_session = property(fget=get_log_entry_log_assignment_session)
def get_log_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LogLookupSession')(
provider_session=self._provider_manager.get_log_lookup_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
log_lookup_session = property(fget=get_log_lookup_session)
def get_log_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LogAdminSession')(
provider_session=self._provider_manager.get_log_admin_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
log_admin_session = property(fget=get_log_admin_session)
def get_log_hierarchy_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LogHierarchySession')(
provider_session=self._provider_manager.get_log_hierarchy_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
log_hierarchy_session = property(fget=get_log_hierarchy_session)
def get_log_hierarchy_design_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LogHierarchyDesignSession')(
provider_session=self._provider_manager.get_log_hierarchy_design_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
log_hierarchy_design_session = property(fget=get_log_hierarchy_design_session)
def get_logging_batch_manager(self):
raise Unimplemented()
logging_batch_manager = property(fget=get_logging_batch_manager)
class LoggingProxyManager(osid_managers.OsidProxyManager, LoggingProfile, logging_managers.LoggingProxyManager):
"""Adapts underlying LoggingProxyManager methodswith authorization checks."""
def __init__(self):
LoggingProfile.__init__(self)
def initialize(self, runtime):
osid_managers.OsidProxyManager.initialize(self, runtime)
config = self._my_runtime.get_configuration()
parameter_id = Id('parameter:loggingProviderImpl@authz_adapter')
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
self._provider_manager = runtime.get_proxy_manager('LOGGING', provider_impl)
# need to add version argument
@raise_null_argument
def get_logging_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LoggingSession')(
provider_session=self._provider_manager.get_logging_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_logging_session_for_log(self, log_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'LoggingSession')(
provider_session=self._provider_manager.get_logging_session_for_log(log_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_log_entry_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_log_entry_query_session(proxy)
query_session.use_federated_log_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'LogEntryLookupSession')(
provider_session=self._provider_manager.get_log_entry_lookup_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_log_entry_lookup_session_for_log(self, log_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_log_entry_query_session_for_log(log_id, proxy)
query_session.use_federated_log_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'LogEntryLookupSession')(
provider_session=self._provider_manager.get_log_entry_lookup_session_for_log(log_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_log_entry_query_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_log_entry_query_session(proxy)
query_session.use_federated_log_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'LogEntryQuerySession')(
provider_session=self._provider_manager.get_log_entry_query_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_log_entry_query_session_for_log(self, log_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_log_entry_query_session_for_log(log_id, proxy)
query_session.use_federated_log_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'LogEntryQuerySession')(
provider_session=self._provider_manager.get_log_entry_query_session_for_log(log_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_log_entry_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LogEntryAdminSession')(
provider_session=self._provider_manager.get_log_entry_admin_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_log_entry_admin_session_for_log(self, log_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'LogEntryAdminSession')(
provider_session=self._provider_manager.get_log_entry_admin_session_for_log(log_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_log_entry_log_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LogEntryLogSession')(
provider_session=self._provider_manager.get_log_entry_log_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_log_entry_log_assignment_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LogEntryLogAssignmentSession')(
provider_session=self._provider_manager.get_log_entry_log_assignment_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_log_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LogLookupSession')(
provider_session=self._provider_manager.get_log_lookup_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_log_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LogAdminSession')(
provider_session=self._provider_manager.get_log_admin_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_log_hierarchy_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LogHierarchySession')(
provider_session=self._provider_manager.get_log_hierarchy_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_log_hierarchy_design_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'LogHierarchyDesignSession')(
provider_session=self._provider_manager.get_log_hierarchy_design_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
def get_logging_batch_proxy_manager(self):
raise Unimplemented()
logging_batch_proxy_manager = property(fget=get_logging_batch_proxy_manager)
| |
'''
Created on 10 Sep 2014
@author: gustavo
'''
import math
import re
from ot import Ot
#from xlsx import Xslxsaver
import copy
NULL = -1
max_collum = 0
mdegree = 0;
class Reduction(object):
def __init__(self, debug):
self.debug = debug
def reduction(self,exp):
#xls = Xslxsaver()
#xls.create_worksheet(exp)
self.otimizator = Ot()
exp_sorted = sorted(exp, reverse=True)
self.mdegree = exp_sorted[0]
self.max_collum = (2*exp_sorted[0])-1
nr = self.__calc_NR__(exp_sorted)
self.matrix = self.__generate_matrix__()
exp_sorted.remove(self.mdegree)
self.matrix = self.__multiply__(self.matrix, self.mdegree)
#xls.save(self.matrix, 'Multiplication')
print "Finished Multiplication"
for i in range(0,nr+1):
self.__reduce_others__(self.matrix,exp_sorted)
#xls.save(self.matrix, 'step_reduction_'+str(i))
self.__remove_repeat__(self.matrix)
self.clean(self.matrix)
self.matrix = self.otimizator.sort(self.matrix)
self.clean(self.matrix)
self.matrix = self.__reduce_matrix__(self.mdegree, self.matrix)
#xls.save(self.matrix, 'reduced')
print "Finished Cleaning"
self.p, self.matrix, self.frequency_counter, self.columns_of_pair = self.otimizator.optimize(self.matrix, self.mdegree)
self.__remove_one__(self.matrix)
row = [-1 for x in xrange(self.mdegree)]
self.matrix.append(row)
count = self.__count_xor__(self.matrix,self.p)
#xls.save(self.matrix, 'Optimized')
#xls.save_matches(self.p, self.frequency_counter, self.columns_of_pair)
del self.matrix
return count
def __multiply__(self, matrix, degree):
temp_reuse = 0
for offset in xrange(0, degree):
index = self.max_collum-1;
row = [-1 for x in xrange(self.max_collum)]
temp = 0
for j in xrange(0,self.mdegree):
row[index] = j + temp_reuse
index = index-1
temp = j + temp_reuse
index = 2*self.mdegree - 2 - self.mdegree;
temp_reuse = temp
for j in xrange(self.mdegree, 2*self.mdegree - 1):
#print self.max_collum - index
row[index] = temp + degree
index = index - 1
temp = temp + degree
#print index
for i in xrange(0, offset):
row[i] = -1
row[self.max_collum-1-i] =-1
matrix.append(row)
return matrix
def __reduce_matrix__(self, degree, matrix):
#print "printing..."
matrix_copy = [[-1 for x in range(degree)] for x in range(len(matrix))]
for i in xrange(0, len(matrix)):
h = 0
#print i
for j in xrange(degree-1, len(matrix[0])):
matrix_copy[i][h] = matrix[i][j]
h += 1
#print_matrix(matrix_copy)
del matrix
return matrix_copy
def __count_matchs__(self, matches):
count = 0;
for i in matches:
count = count + (len(matches[i])-1)
return count
def __count_xor__(self, matrix, p):
rowToWrite = [-1 for x in xrange(self.mdegree)]
row = matrix[0]
for j in range(0,len(row)):
countT = 0
element = row[j]
if element <> NULL:
for l in range(1, len(matrix)):
rowToCompare = matrix[l]
elementToCompare = rowToCompare[j]
if elementToCompare <> NULL or (re.search('[a-zA-Z]', str(elementToCompare)) <> None):
countT = countT + 1;
#print "Column :", j, " count: ", countT, " element: ", elementToCompare
rowToWrite[j] = countT
matrix.append(rowToWrite)
rowToCalc = matrix[len(matrix)-1]
count = 0
for i in range(0,len(rowToCalc)):
tx = rowToCalc[i]
count = count + tx
count = count + len(p)
#print
return count
def delete(self):
del self.matrix
def clean(self, matrix):
toRemove = []
for m in matrix:
if self.is_clean(m):
toRemove.append(m)
for i in toRemove:
matrix.remove(i)
def is_clean(self, row):
for i in row:
if i <> NULL:
return False
return True
def __reduce_others__(self, matrix, exp):
to_reduce = self.__need_to_reduce__(matrix)
for index in to_reduce:
for e in exp:
reduceRow = self.reduce(matrix[index],e)
matrix.append(reduceRow)
self.__clean_reduced__(matrix,index)
self.__remove_repeat__(self.matrix)
matrix = self.clean(matrix)
def __remove_one__(self, matrix):
for j in range(1, len(matrix)):
row = matrix[j]
for i in range(self.mdegree-1, len(row)):
valueToCompare = row[i]
if valueToCompare <> NULL:
for m in range(j+1, len(matrix)):
rowToCompare = matrix[m]
toCompare = rowToCompare[i]
if toCompare <> NULL:
if valueToCompare == toCompare:
rowToCompare[i] = NULL;
matrix[m] = rowToCompare
matrix[j] = row
def __remove_repeat__(self, matrix):
for j in range(1, len(matrix)):
row = matrix[j]
for i in range(0, len(row)):
found = False
valueToCompare = row[i]
if valueToCompare <> NULL:
for m in range(j+1, len(matrix)):
rowToCompare = matrix[m]
toCompare = rowToCompare[i]
if toCompare <> NULL:
if valueToCompare == toCompare:
rowToCompare[i] = NULL;
row[i] = NULL;
found = True;
matrix[m] = rowToCompare
if found:
break
matrix[j] = row
def __clean_reduced__(self, matrix, index):
row = matrix[index]
for j in range(0,self.mdegree-1):
row[j] = NULL
matrix[index] = row
def reduce(self, row, exp):
index = self.max_collum-1;
rowReduced = [-1 for x in xrange(self.max_collum)]
for j in range(self.mdegree-2,-1,-1):
element = row[j]
rowReduced[index - exp] = element
index = index -1
return rowReduced
def __need_to_reduce__(self, matrix):
indexOfRows = []
index = (self.max_collum - 1 - self.mdegree);
for i in range(1,len(matrix)):
row = matrix[i]
if row[index] <> NULL:
indexOfRows.append(i)
return indexOfRows
def __reduce_first__(self, matrix, exp):
index = self.max_collum-1;
row = [-1 for x in xrange(self.max_collum)]
for j in xrange(self.mdegree-2,-1,-1):
element = matrix[0][j]
row[index - exp] = element
index = index -1
matrix.append(row)
def __calc_NR__(self, exp_sorted):
nr = 2
nr = int(math.floor((exp_sorted[0]-2)/(exp_sorted[0]-exp_sorted[1])))
#print "NR = ", nr
#temp = (exp_sorted[0]+1)/2
#deg = math.floor(temp)
#if exp_sorted[1] > deg:
# nr = 2* (exp_sorted[0] + 1) - exp_sorted[0]
return nr
def __generate_matrix__(self):
#row = sorted(list(range(0, self.max_collum)), reverse=True)
matrix = [[]]
return matrix
def _column(self, matrix, i):
return [row[i] for row in matrix]
def print_matrix(matrix):
for r in matrix:
print ''.join(str(r))
print '----------------------FIM---------------------'
| |
import unittest2
try:
import numpy as np
from .nmpy import NumpyFeature, StreamingNumpyDecoder, PackedNumpyEncoder
except ImportError:
np = None
from .persistence import PersistenceSettings
from .data import *
from .model import BaseModel
from .lmdbstore import LmdbDatabase
from .extractor import Node
from tempfile import mkdtemp
from shutil import rmtree
class PassThrough(Node):
def __init__(self, needs=None):
super(PassThrough, self).__init__(needs=needs)
def _process(self, data):
yield data
class BaseNumpyTest(object):
def setUp(self):
if np is None:
self.skipTest('numpy is not available')
class Settings(PersistenceSettings):
id_provider = UuidProvider()
key_builder = StringDelimitedKeyBuilder()
database = InMemoryDatabase(key_builder=key_builder)
self.Settings = self._register_database(Settings)
def _check_array(self, arr, shape, dtype, orig):
self.assertTrue(isinstance(arr, np.ndarray))
self.assertTrue(np.all(arr == orig))
self.assertEqual(shape, arr.shape)
self.assertEqual(dtype, arr.dtype)
def _build_doc(self):
class Doc(BaseModel, self.Settings):
feat = NumpyFeature(PassThrough, store=True)
packed = NumpyFeature(
PassThrough,
needs=feat,
encoder=PackedNumpyEncoder,
store=True)
return Doc
def _restore(self, data):
return data
def _arrange(self, shape=None, dtype=None):
cls = self._build_doc()
arr = np.recarray(shape, dtype=dtype) \
if isinstance(dtype, list) else np.zeros(shape, dtype=dtype)
_id = cls.process(feat=arr)
doc = cls(_id)
recovered = self._restore(doc.feat)
self._check_array(recovered, shape, dtype, arr)
def _register_database(self):
raise NotImplemented()
def test_can_store_and_retrieve_packed_array(self):
cls = self._build_doc()
arr = np.zeros((10, 9))
_id = cls.process(feat=arr)
doc = cls(_id)
recovered = self._restore(doc.packed)
self.assertEqual(np.uint8, recovered.dtype)
self.assertEqual((10, 2), recovered.shape)
def test_can_store_and_retrieve_empty_array(self):
self._arrange((0,), np.uint8)
def test_can_store_and_retrieve_1d_float32_array(self):
self._arrange((33,), np.float32)
def test_can_store_and_retreive_multidimensional_uint8_array(self):
self._arrange((12, 13), np.uint8)
def test_can_store_and_retrieve_multidimensional_float32_array(self):
self._arrange((5, 10, 11), np.float32)
def test_can_store_and_retrieve_recarray(self):
self._arrange(shape=(25,), dtype=[ \
('x', np.uint8, (509,)),
('y', 'a32')])
class GreedyNumpyTest(BaseNumpyTest, unittest2.TestCase):
def _register_database(self, settings_class):
return settings_class.clone(
database=InMemoryDatabase(key_builder=settings_class.key_builder))
class GreedyNumpyOnDiskTest(BaseNumpyTest, unittest2.TestCase):
def _register_database(self, settings_class):
self._dir = mkdtemp()
return settings_class.clone(database=FileSystemDatabase(
path=self._dir,
key_builder=settings_class.key_builder))
def tearDown(self):
rmtree(self._dir)
class GreedyNumpyLmdbTest(BaseNumpyTest, unittest2.TestCase):
def _register_database(self, settings_class):
self._dir = mkdtemp()
return settings_class.clone(database=LmdbDatabase(
path=self._dir,
map_size=10000000,
key_builder=settings_class.key_builder))
def tearDown(self):
rmtree(self._dir)
class StreamingNumpyTest(BaseNumpyTest, unittest2.TestCase):
def _register_database(self, settings_class):
return settings_class.clone(
database=InMemoryDatabase(key_builder=settings_class.key_builder))
def _build_doc(self):
class Doc(BaseModel, self.Settings):
feat = NumpyFeature(
PassThrough,
store=True,
decoder=StreamingNumpyDecoder(n_examples=3))
packed = NumpyFeature(
PassThrough,
needs=feat,
encoder=PackedNumpyEncoder,
decoder=StreamingNumpyDecoder(n_examples=3),
store=True)
return Doc
def _restore(self, data):
return np.concatenate(list(data))
class StreamingNumpyOnDiskTest(BaseNumpyTest, unittest2.TestCase):
def _register_database(self, settings_class):
self._dir = mkdtemp()
return settings_class.clone(database=FileSystemDatabase(
path=self._dir,
key_builder=settings_class.key_builder))
def tearDown(self):
rmtree(self._dir)
def _build_doc(self):
class Doc(BaseModel, self.Settings):
feat = NumpyFeature(
PassThrough,
store=True,
decoder=StreamingNumpyDecoder(n_examples=3))
packed = NumpyFeature(
PassThrough,
needs=feat,
encoder=PackedNumpyEncoder,
decoder=StreamingNumpyDecoder(n_examples=3),
store=True)
return Doc
def _restore(self, data):
return np.concatenate(list(data))
class StreamingNumpyLmdbTest(BaseNumpyTest, unittest2.TestCase):
def _register_database(self, settings_class):
self._dir = mkdtemp()
return settings_class.clone(database=LmdbDatabase(
path=self._dir,
map_size=10000000,
key_builder=settings_class.key_builder))
def tearDown(self):
rmtree(self._dir)
def _build_doc(self):
class Doc(BaseModel, self.Settings):
feat = NumpyFeature(
PassThrough,
store=True,
decoder=StreamingNumpyDecoder(n_examples=3))
packed = NumpyFeature(
PassThrough,
needs=feat,
encoder=PackedNumpyEncoder,
decoder=StreamingNumpyDecoder(n_examples=3),
store=True)
return Doc
def _restore(self, data):
return np.concatenate(list(data))
| |
"""
Post Class
for lap_joint script
Post:
.id int Post identifier
.brep Brep Brep representing Post
.profile Curve Curve defining end profile of post
.axis Line Line between centers of end faces
.origin Point start of axis Line
.orientation Plane plane with normal along axis and x-axis towards
center of one face
.pockets list list of Pockets on this Post
.isConnected Bool true if this post is part of a joint
.selfToGlobal Transform convert local coordinates to global
.globalToSelf Transform convert global coordinates to local
.millToGlobal Transform convert unrotated mill coordinates to global
"""
import Rhino
import scriptcontext as sc
import rhinoscriptsyntax as rs
import math
import common
from toolpath import *
class Post:
"""A single post in the system."""
def __init__(self, axis=None, obRef=None, roll=None, group=None, width=None, height=None, id=None):
"""Initialize a Post.
Gathers all information about this Post
Offers multiple ways to describe a Post:
Start with Rhino object:
obRef: reference to a Rhino object
Start with lines:
group: obRef of one object in a group
OR
axis: central axis of the post
roll: (optional), line normal to axis, defines roll of post
For a rectangular Post:
width: width along roll axis
height: other short dimension of Post
"""
self.isConnected = False
self.brep = None
#not sure about this. id is None until assigned by the Structure?
self.id = id
if group: #creating Post with axis and roll lines grouped together
#find group this object belongs to
groups = group.Object().Attributes.GetGroupList()
if len(groups) < 1:
raise NameError("Object does not belong to a group.")
group_id = groups[0]
#get all objects in the group
objects = rs.ObjectsByGroup(sc.doc.Groups.GroupName(group_id))
if len(objects) != 2:
raise NameError("Group does not have two objects (axis, roll).")
#get actual curves
curves = [sc.doc.Objects.Find(ob).CurveGeometry for ob in objects]
#convert to lines
lines = [Rhino.Geometry.Line(c.PointAtStart, c.PointAtEnd) for c in curves]
#roll is shorter than axis
roll, axis = sorted(lines, key=lambda l: l.Length)
if axis: #creating Post based on lines
if not (width and height): #currently only rectangular solids.
raise NameError("Height and width required if an object is not given.")
if type(axis) is Rhino.DocObjects.ObjRef: #axis is objref to a curve
#find actual curve geometry
axis = axis.Geometry()
if type(axis) is Rhino.DocObjects.ObjectType.Curve:
self.axis = Rhino.Geometry.Line(axis.PointAtStart, axis.PointAtEnd)
else: #assume for now axis is either curve or internal line object
self.axis = axis
if roll:
#if roll is a curve, convert it to a Line
if type(roll) == Rhino.DocObjects.ObjectType.Curve:
roll = Rhino.Geometry.Line(roll.PointAtStart, roll.PointAtEnd)
self.orientation = rs.PlaneFromNormal(self.axis.From,
self.axis.UnitTangent, roll.UnitTangent)
else:
#construct orientation with default roll angle
self.orientation = rs.PlaneFromNormal(self.axis.From,
self.axis.UnitTangent)
#construct rectangular profile curve
self.profile = self.makeRectProfile(width, height)
elif obRef: #no axis, need obRef
object = obRef.Object()
if object is None:
raise NameError("No object found corresponding to reference " + str(obRef))
#actual object geometry
self.brep = common.getBrep(object)
#assume smallest faces are the ends of the Post
endFaces = sorted(self.brep.Faces, key=rs.SurfaceArea)[0:2]
#get curve defining post profile
self.profile = Rhino.Geometry.Curve.JoinCurves(endFaces[0].DuplicateFace(False).DuplicateEdgeCurves())
#axis is a Line between centers of smallest faces.
self.axis = Rhino.Geometry.Line(
*[rs.SurfaceAreaCentroid(face)[0] for face in endFaces])
else : #no axis and no obRef
raise NameError('No valid axis or obRef given.')
#just for convenience and simplicity
self.origin = self.axis.From
#get orientation of Post
self.orientation = self.findOrientation()
#store conversions to and from Post's orientation
#rotate 90 degrees about y axis to align posts with x instead of z axis
self.globalToSelf = Rhino.Geometry.Transform.Rotation(1,0,
Rhino.Geometry.Vector3d.YAxis, Rhino.Geometry.Point3d.Origin)
#transform global coordinates to post's local coordinates
self.globalToSelf *= Rhino.Geometry.Transform.ChangeBasis(
Rhino.Geometry.Plane.WorldXY, self.orientation)
#go the other way
self.selfToGlobal = self.globalToSelf.TryGetInverse()[1]
#initialize list of this Post's Pockets
self.pockets = []
###########
#Post Class Functions
def info(self):
"""Displays a text summary of this post."""
print "Post: " + self.printId() + \
"\n Length: " + str(round(self.axis.Length, 2)) + \
"\n Origin: " + common.printPoint3d(self.origin) + \
"\n----"
def display(self, objects=None):
"""Create objects in viewport to display information about this post.
'objects' determines which objects to display
Creates:
label text dot with post id
orientation aligned plane with corner on post origin
profile profile curve
object post object, if not using obrefs
axis axis Line
Returns: list of guids of added objects
"""
guids = []
if objects == None:
objects = ['label', 'orientation']
if 'label' in objects:
guids.append(rs.AddTextDot(self.printId(), self.origin))
if 'orientation' in objects:
guids.append(common.displayPlane(self.orientation))
if 'profile' in objects:
guids.append(sc.doc.Objects.AddCurve(self.profile))
if 'object' in objects:
if not self.brep:
vector = Rhino.Geometry.Vector3d(self.axis.To - self.axis.From)
guids.append(sc.doc.Objects.AddBrep(
Rhino.Geometry.Surface.CreateExtrusion(self.profile, vector).ToBrep()))
rs.CapPlanarHoles(guids[-1])
if 'axis' in objects:
guids.append(sc.doc.Objects.AddLine(self.axis))
if 'xAxis' in objects:
guids.append(sc.doc.Objects.AddLine(self.origin, self.origin + self.orientation.XAxis))
return guids
def printId(self):
"""return id with type letter"""
return 'p' + str(self.id)
def findOrientation(self):
"""Find the orientation (direction and roll) of a post.
Returns: plane with normal along axis and x-axis towards center of one face.
"""
#grab one edge of profile arbitrarily
if type(self.profile) is Rhino.Geometry.PolyCurve:
one_edge = self.profile.Explode()[0]
else:
raise NameError("Profile is wrong type of curve: " + str(type(self.profile)))
middle_of_edge = one_edge.PointAtNormalizedLength(.5)
#create plane from origin, normal vector, and x-axis vector
return rs.PlaneFromNormal(self.origin,
self.axis.UnitTangent,
rs.VectorCreate(self.origin, middle_of_edge))
def makeRoll(self):
"""Construct a default horizontal roll angle"""
#get plane normal to axis at arbitrary rotation
plane = rs.PlaneFromNormal(self.axis.From, self.axis.UnitTangent)
#set roll to horizontal component of x axis
roll = Rhino.Geometry.Vector3d(plane.XAxis.X, plane.XAxis.Y, 0)
if roll.IsZero:
roll = plane.YAxis
return Rhino.Geometry.Line(self.axis.From, plane.XAxis)
def makeRectProfile(self, width, height):
"""create a Post profile using the Post's orientation
Returns: rectangular PolyCurve boundary
"""
#get corner uv coordinates
corners = [[width/2, height/2], [width/2, -height/2],
[-width/2, -height/2], [-width/2, height/2]]
#close curve
corners.append(corners[0])
#convert local uvs to global points
points = [self.orientation.PointAt(c[0], c[1]) for c in corners]
#create polylinecurve
polyline = Rhino.Geometry.Polyline(points)
#get list of edge curves
curves = [Rhino.Geometry.LineCurve(line) for line in polyline.GetSegments()]
#join as polycurve
return Rhino.Geometry.Curve.JoinCurves(curves)[0]
def makeGcode(self, gcode=False):
"""Convert mill paths of each pocket into Gcode for the entire Post
Returns: gcode string for milling post
"""
if not gcode:
gcode = common.Gcode()
gcode.text += common.settings.gcode['preamble'] + "\n"
gcode.text += "(Starting Post {0})\n".format(self.printId())
for p in self.pockets:
p.makeGcode(gcode=gcode)
#get coordinates of home point
home = str(common.settings.gcode['home']).split(',')
home = [round(float(x), common.settings.gcode['precision']) for x in home]
#return to home point when finished
Rapid(Rhino.Geometry.Point3d(*home[0:3]), A=home[3], clear=True).makeGcode(gcode=gcode)
return gcode
# End Post Class #
| |
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import (
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_equal, assert_)
from pytest import raises as assert_raises
from scipy.interpolate import (
KroghInterpolator, krogh_interpolate,
BarycentricInterpolator, barycentric_interpolate,
approximate_taylor_polynomial, pchip, PchipInterpolator,
pchip_interpolate, Akima1DInterpolator, CubicSpline, make_interp_spline)
from scipy._lib.six import xrange
def check_shape(interpolator_cls, x_shape, y_shape, deriv_shape=None, axis=0,
extra_args={}):
np.random.seed(1234)
x = [-1, 0, 1, 2, 3, 4]
s = list(range(1, len(y_shape)+1))
s.insert(axis % (len(y_shape)+1), 0)
y = np.random.rand(*((6,) + y_shape)).transpose(s)
# Cython code chokes on y.shape = (0, 3) etc, skip them
if y.size == 0:
return
xi = np.zeros(x_shape)
yi = interpolator_cls(x, y, axis=axis, **extra_args)(xi)
target_shape = ((deriv_shape or ()) + y.shape[:axis]
+ x_shape + y.shape[axis:][1:])
assert_equal(yi.shape, target_shape)
# check it works also with lists
if x_shape and y.size > 0:
interpolator_cls(list(x), list(y), axis=axis, **extra_args)(list(xi))
# check also values
if xi.size > 0 and deriv_shape is None:
bs_shape = y.shape[:axis] + (1,)*len(x_shape) + y.shape[axis:][1:]
yv = y[((slice(None,),)*(axis % y.ndim)) + (1,)]
yv = yv.reshape(bs_shape)
yi, y = np.broadcast_arrays(yi, yv)
assert_allclose(yi, y)
SHAPES = [(), (0,), (1,), (6, 2, 5)]
def test_shapes():
def spl_interp(x, y, axis):
return make_interp_spline(x, y, axis=axis)
for ip in [KroghInterpolator, BarycentricInterpolator, pchip,
Akima1DInterpolator, CubicSpline, spl_interp]:
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
if ip != CubicSpline:
check_shape(ip, s1, s2, None, axis)
else:
for bc in ['natural', 'clamped']:
extra = {'bc_type': bc}
check_shape(ip, s1, s2, None, axis, extra)
def test_derivs_shapes():
def krogh_derivs(x, y, axis=0):
return KroghInterpolator(x, y, axis).derivatives
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
check_shape(krogh_derivs, s1, s2, (6,), axis)
def test_deriv_shapes():
def krogh_deriv(x, y, axis=0):
return KroghInterpolator(x, y, axis).derivative
def pchip_deriv(x, y, axis=0):
return pchip(x, y, axis).derivative()
def pchip_deriv2(x, y, axis=0):
return pchip(x, y, axis).derivative(2)
def pchip_antideriv(x, y, axis=0):
return pchip(x, y, axis).derivative()
def pchip_antideriv2(x, y, axis=0):
return pchip(x, y, axis).derivative(2)
def pchip_deriv_inplace(x, y, axis=0):
class P(PchipInterpolator):
def __call__(self, x):
return PchipInterpolator.__call__(self, x, 1)
pass
return P(x, y, axis)
def akima_deriv(x, y, axis=0):
return Akima1DInterpolator(x, y, axis).derivative()
def akima_antideriv(x, y, axis=0):
return Akima1DInterpolator(x, y, axis).antiderivative()
def cspline_deriv(x, y, axis=0):
return CubicSpline(x, y, axis).derivative()
def cspline_antideriv(x, y, axis=0):
return CubicSpline(x, y, axis).antiderivative()
def bspl_deriv(x, y, axis=0):
return make_interp_spline(x, y, axis=axis).derivative()
def bspl_antideriv(x, y, axis=0):
return make_interp_spline(x, y, axis=axis).antiderivative()
for ip in [krogh_deriv, pchip_deriv, pchip_deriv2, pchip_deriv_inplace,
pchip_antideriv, pchip_antideriv2, akima_deriv, akima_antideriv,
cspline_deriv, cspline_antideriv, bspl_deriv, bspl_antideriv]:
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
check_shape(ip, s1, s2, (), axis)
def _check_complex(ip):
x = [1, 2, 3, 4]
y = [1, 2, 1j, 3]
p = ip(x, y)
assert_allclose(y, p(x))
def test_complex():
for ip in [KroghInterpolator, BarycentricInterpolator, pchip, CubicSpline]:
_check_complex(ip)
class TestKrogh(object):
def setup_method(self):
self.true_poly = np.poly1d([-2,3,1,5,-4])
self.test_xs = np.linspace(-1,1,100)
self.xs = np.linspace(-1,1,5)
self.ys = self.true_poly(self.xs)
def test_lagrange(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_scalar(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(7),P(7))
assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7)))
def test_derivatives(self):
P = KroghInterpolator(self.xs,self.ys)
D = P.derivatives(self.test_xs)
for i in xrange(D.shape[0]):
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
D[i])
def test_low_derivatives(self):
P = KroghInterpolator(self.xs,self.ys)
D = P.derivatives(self.test_xs,len(self.xs)+2)
for i in xrange(D.shape[0]):
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
D[i])
def test_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
m = 10
r = P.derivatives(self.test_xs,m)
for i in xrange(m):
assert_almost_equal(P.derivative(self.test_xs,i),r[i])
def test_high_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
for i in xrange(len(self.xs),2*len(self.xs)):
assert_almost_equal(P.derivative(self.test_xs,i),
np.zeros(len(self.test_xs)))
def test_hermite(self):
xs = [0,0,0,1,1,1,2]
ys = [self.true_poly(0),
self.true_poly.deriv(1)(0),
self.true_poly.deriv(2)(0),
self.true_poly(1),
self.true_poly.deriv(1)(1),
self.true_poly.deriv(2)(1),
self.true_poly(2)]
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_vector(self):
xs = [0, 1, 2]
ys = np.array([[0,1],[1,0],[2,1]])
P = KroghInterpolator(xs,ys)
Pi = [KroghInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]
test_xs = np.linspace(-1,3,100)
assert_almost_equal(P(test_xs),
np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
assert_almost_equal(P.derivatives(test_xs),
np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]),
(1,2,0)))
def test_empty(self):
P = KroghInterpolator(self.xs,self.ys)
assert_array_equal(P([]), [])
def test_shapes_scalarvalue(self):
P = KroghInterpolator(self.xs,self.ys)
assert_array_equal(np.shape(P(0)), ())
assert_array_equal(np.shape(P(np.array(0))), ())
assert_array_equal(np.shape(P([0])), (1,))
assert_array_equal(np.shape(P([0,1])), (2,))
def test_shapes_scalarvalue_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
n = P.n
assert_array_equal(np.shape(P.derivatives(0)), (n,))
assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))
assert_array_equal(np.shape(P.derivatives([0])), (n,1))
assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))
def test_shapes_vectorvalue(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
assert_array_equal(np.shape(P(0)), (3,))
assert_array_equal(np.shape(P([0])), (1,3))
assert_array_equal(np.shape(P([0,1])), (2,3))
def test_shapes_1d_vectorvalue(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,[1]))
assert_array_equal(np.shape(P(0)), (1,))
assert_array_equal(np.shape(P([0])), (1,1))
assert_array_equal(np.shape(P([0,1])), (2,1))
def test_shapes_vectorvalue_derivative(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
n = P.n
assert_array_equal(np.shape(P.derivatives(0)), (n,3))
assert_array_equal(np.shape(P.derivatives([0])), (n,1,3))
assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3))
def test_wrapper(self):
P = KroghInterpolator(self.xs, self.ys)
ki = krogh_interpolate
assert_almost_equal(P(self.test_xs), ki(self.xs, self.ys, self.test_xs))
assert_almost_equal(P.derivative(self.test_xs, 2),
ki(self.xs, self.ys, self.test_xs, der=2))
assert_almost_equal(P.derivatives(self.test_xs, 2),
ki(self.xs, self.ys, self.test_xs, der=[0, 1]))
def test_int_inputs(self):
# Check input args are cast correctly to floats, gh-3669
x = [0, 234, 468, 702, 936, 1170, 1404, 2340, 3744, 6084, 8424,
13104, 60000]
offset_cdf = np.array([-0.95, -0.86114777, -0.8147762, -0.64072425,
-0.48002351, -0.34925329, -0.26503107,
-0.13148093, -0.12988833, -0.12979296,
-0.12973574, -0.08582937, 0.05])
f = KroghInterpolator(x, offset_cdf)
assert_allclose(abs((f(x) - offset_cdf) / f.derivative(x, 1)),
0, atol=1e-10)
def test_derivatives_complex(self):
# regression test for gh-7381: krogh.derivatives(0) fails complex y
x, y = np.array([-1, -1, 0, 1, 1]), np.array([1, 1.0j, 0, -1, 1.0j])
func = KroghInterpolator(x, y)
cmplx = func.derivatives(0)
cmplx2 = (KroghInterpolator(x, y.real).derivatives(0) +
1j*KroghInterpolator(x, y.imag).derivatives(0))
assert_allclose(cmplx, cmplx2, atol=1e-15)
class TestTaylor(object):
def test_exponential(self):
degree = 5
p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
for i in xrange(degree+1):
assert_almost_equal(p(0),1)
p = p.deriv()
assert_almost_equal(p(0),0)
class TestBarycentric(object):
def setup_method(self):
self.true_poly = np.poly1d([-2,3,1,5,-4])
self.test_xs = np.linspace(-1,1,100)
self.xs = np.linspace(-1,1,5)
self.ys = self.true_poly(self.xs)
def test_lagrange(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_scalar(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(7),P(7))
assert_almost_equal(self.true_poly(np.array(7)),P(np.array(7)))
def test_delayed(self):
P = BarycentricInterpolator(self.xs)
P.set_yi(self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_append(self):
P = BarycentricInterpolator(self.xs[:3],self.ys[:3])
P.add_xi(self.xs[3:],self.ys[3:])
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_vector(self):
xs = [0, 1, 2]
ys = np.array([[0,1],[1,0],[2,1]])
P = BarycentricInterpolator(xs,ys)
Pi = [BarycentricInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]
test_xs = np.linspace(-1,3,100)
assert_almost_equal(P(test_xs),
np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
def test_shapes_scalarvalue(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_array_equal(np.shape(P(0)), ())
assert_array_equal(np.shape(P(np.array(0))), ())
assert_array_equal(np.shape(P([0])), (1,))
assert_array_equal(np.shape(P([0,1])), (2,))
def test_shapes_vectorvalue(self):
P = BarycentricInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
assert_array_equal(np.shape(P(0)), (3,))
assert_array_equal(np.shape(P([0])), (1,3))
assert_array_equal(np.shape(P([0,1])), (2,3))
def test_shapes_1d_vectorvalue(self):
P = BarycentricInterpolator(self.xs,np.outer(self.ys,[1]))
assert_array_equal(np.shape(P(0)), (1,))
assert_array_equal(np.shape(P([0])), (1,1))
assert_array_equal(np.shape(P([0,1])), (2,1))
def test_wrapper(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_almost_equal(P(self.test_xs),barycentric_interpolate(self.xs,self.ys,self.test_xs))
class TestPCHIP(object):
def _make_random(self, npts=20):
np.random.seed(1234)
xi = np.sort(np.random.random(npts))
yi = np.random.random(npts)
return pchip(xi, yi), xi, yi
def test_overshoot(self):
# PCHIP should not overshoot
p, xi, yi = self._make_random()
for i in range(len(xi)-1):
x1, x2 = xi[i], xi[i+1]
y1, y2 = yi[i], yi[i+1]
if y1 > y2:
y1, y2 = y2, y1
xp = np.linspace(x1, x2, 10)
yp = p(xp)
assert_(((y1 <= yp) & (yp <= y2)).all())
def test_monotone(self):
# PCHIP should preserve monotonicty
p, xi, yi = self._make_random()
for i in range(len(xi)-1):
x1, x2 = xi[i], xi[i+1]
y1, y2 = yi[i], yi[i+1]
xp = np.linspace(x1, x2, 10)
yp = p(xp)
assert_(((y2-y1) * (yp[1:] - yp[:1]) > 0).all())
def test_cast(self):
# regression test for integer input data, see gh-3453
data = np.array([[0, 4, 12, 27, 47, 60, 79, 87, 99, 100],
[-33, -33, -19, -2, 12, 26, 38, 45, 53, 55]])
xx = np.arange(100)
curve = pchip(data[0], data[1])(xx)
data1 = data * 1.0
curve1 = pchip(data1[0], data1[1])(xx)
assert_allclose(curve, curve1, atol=1e-14, rtol=1e-14)
def test_nag(self):
# Example from NAG C implementation,
# http://nag.com/numeric/cl/nagdoc_cl25/html/e01/e01bec.html
# suggested in gh-5326 as a smoke test for the way the derivatives
# are computed (see also gh-3453)
from scipy._lib.six import StringIO
dataStr = '''
7.99 0.00000E+0
8.09 0.27643E-4
8.19 0.43750E-1
8.70 0.16918E+0
9.20 0.46943E+0
10.00 0.94374E+0
12.00 0.99864E+0
15.00 0.99992E+0
20.00 0.99999E+0
'''
data = np.loadtxt(StringIO(dataStr))
pch = pchip(data[:,0], data[:,1])
resultStr = '''
7.9900 0.0000
9.1910 0.4640
10.3920 0.9645
11.5930 0.9965
12.7940 0.9992
13.9950 0.9998
15.1960 0.9999
16.3970 1.0000
17.5980 1.0000
18.7990 1.0000
20.0000 1.0000
'''
result = np.loadtxt(StringIO(resultStr))
assert_allclose(result[:,1], pch(result[:,0]), rtol=0., atol=5e-5)
def test_endslopes(self):
# this is a smoke test for gh-3453: PCHIP interpolator should not
# set edge slopes to zero if the data do not suggest zero edge derivatives
x = np.array([0.0, 0.1, 0.25, 0.35])
y1 = np.array([279.35, 0.5e3, 1.0e3, 2.5e3])
y2 = np.array([279.35, 2.5e3, 1.50e3, 1.0e3])
for pp in (pchip(x, y1), pchip(x, y2)):
for t in (x[0], x[-1]):
assert_(pp(t, 1) != 0)
def test_all_zeros(self):
x = np.arange(10)
y = np.zeros_like(x)
# this should work and not generate any warnings
with warnings.catch_warnings():
warnings.filterwarnings('error')
pch = pchip(x, y)
xx = np.linspace(0, 9, 101)
assert_equal(pch(xx), 0.)
def test_two_points(self):
# regression test for gh-6222: pchip([0, 1], [0, 1]) fails because
# it tries to use a three-point scheme to estimate edge derivatives,
# while there are only two points available.
# Instead, it should construct a linear interpolator.
x = np.linspace(0, 1, 11)
p = pchip([0, 1], [0, 2])
assert_allclose(p(x), 2*x, atol=1e-15)
def test_pchip_interpolate(self):
assert_array_almost_equal(
pchip_interpolate([1,2,3], [4,5,6], [0.5], der=1),
[1.])
assert_array_almost_equal(
pchip_interpolate([1,2,3], [4,5,6], [0.5], der=0),
[3.5])
assert_array_almost_equal(
pchip_interpolate([1,2,3], [4,5,6], [0.5], der=[0, 1]),
[[3.5], [1]])
def test_roots(self):
# regression test for gh-6357: .roots method should work
p = pchip([0, 1], [-1, 1])
r = p.roots()
assert_allclose(r, 0.5)
class TestCubicSpline(object):
@staticmethod
def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',
tol=1e-14):
"""Check that spline coefficients satisfy the continuity and boundary
conditions."""
x = S.x
c = S.c
dx = np.diff(x)
dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))
dxi = dx[:-1]
# Check C2 continuity.
assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +
c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)
assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +
2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)
assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],
rtol=tol, atol=tol)
# Check that we found a parabola, the third derivative is 0.
if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':
assert_allclose(c[0], 0, rtol=tol, atol=tol)
return
# Check periodic boundary conditions.
if bc_start == 'periodic':
assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)
assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)
assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)
return
# Check other boundary conditions.
if bc_start == 'not-a-knot':
if x.size == 2:
slope = (S(x[1]) - S(x[0])) / dx[0]
assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)
else:
assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)
elif bc_start == 'clamped':
assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)
elif bc_start == 'natural':
assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)
else:
order, value = bc_start
assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)
if bc_end == 'not-a-knot':
if x.size == 2:
slope = (S(x[1]) - S(x[0])) / dx[0]
assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)
else:
assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)
elif bc_end == 'clamped':
assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)
elif bc_end == 'natural':
assert_allclose(S(x[-1], 2), 0, rtol=tol, atol=tol)
else:
order, value = bc_end
assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)
def check_all_bc(self, x, y, axis):
deriv_shape = list(y.shape)
del deriv_shape[axis]
first_deriv = np.empty(deriv_shape)
first_deriv.fill(2)
second_deriv = np.empty(deriv_shape)
second_deriv.fill(-1)
bc_all = [
'not-a-knot',
'natural',
'clamped',
(1, first_deriv),
(2, second_deriv)
]
for bc in bc_all[:3]:
S = CubicSpline(x, y, axis=axis, bc_type=bc)
self.check_correctness(S, bc, bc)
for bc_start in bc_all:
for bc_end in bc_all:
S = CubicSpline(x, y, axis=axis, bc_type=(bc_start, bc_end))
self.check_correctness(S, bc_start, bc_end, tol=2e-14)
def test_general(self):
x = np.array([-1, 0, 0.5, 2, 4, 4.5, 5.5, 9])
y = np.array([0, -0.5, 2, 3, 2.5, 1, 1, 0.5])
for n in [2, 3, x.size]:
self.check_all_bc(x[:n], y[:n], 0)
Y = np.empty((2, n, 2))
Y[0, :, 0] = y[:n]
Y[0, :, 1] = y[:n] - 1
Y[1, :, 0] = y[:n] + 2
Y[1, :, 1] = y[:n] + 3
self.check_all_bc(x[:n], Y, 1)
def test_periodic(self):
for n in [2, 3, 5]:
x = np.linspace(0, 2 * np.pi, n)
y = np.cos(x)
S = CubicSpline(x, y, bc_type='periodic')
self.check_correctness(S, 'periodic', 'periodic')
Y = np.empty((2, n, 2))
Y[0, :, 0] = y
Y[0, :, 1] = y + 2
Y[1, :, 0] = y - 1
Y[1, :, 1] = y + 5
S = CubicSpline(x, Y, axis=1, bc_type='periodic')
self.check_correctness(S, 'periodic', 'periodic')
def test_periodic_eval(self):
x = np.linspace(0, 2 * np.pi, 10)
y = np.cos(x)
S = CubicSpline(x, y, bc_type='periodic')
assert_almost_equal(S(1), S(1 + 2 * np.pi), decimal=15)
def test_dtypes(self):
x = np.array([0, 1, 2, 3], dtype=int)
y = np.array([-5, 2, 3, 1], dtype=int)
S = CubicSpline(x, y)
self.check_correctness(S)
y = np.array([-1+1j, 0.0, 1-1j, 0.5-1.5j])
S = CubicSpline(x, y)
self.check_correctness(S)
S = CubicSpline(x, x ** 3, bc_type=("natural", (1, 2j)))
self.check_correctness(S, "natural", (1, 2j))
y = np.array([-5, 2, 3, 1])
S = CubicSpline(x, y, bc_type=[(1, 2 + 0.5j), (2, 0.5 - 1j)])
self.check_correctness(S, (1, 2 + 0.5j), (2, 0.5 - 1j))
def test_small_dx(self):
rng = np.random.RandomState(0)
x = np.sort(rng.uniform(size=100))
y = 1e4 + rng.uniform(size=100)
S = CubicSpline(x, y)
self.check_correctness(S, tol=1e-13)
def test_incorrect_inputs(self):
x = np.array([1, 2, 3, 4])
y = np.array([1, 2, 3, 4])
xc = np.array([1 + 1j, 2, 3, 4])
xn = np.array([np.nan, 2, 3, 4])
xo = np.array([2, 1, 3, 4])
yn = np.array([np.nan, 2, 3, 4])
y3 = [1, 2, 3]
x1 = [1]
y1 = [1]
assert_raises(ValueError, CubicSpline, xc, y)
assert_raises(ValueError, CubicSpline, xn, y)
assert_raises(ValueError, CubicSpline, x, yn)
assert_raises(ValueError, CubicSpline, xo, y)
assert_raises(ValueError, CubicSpline, x, y3)
assert_raises(ValueError, CubicSpline, x[:, np.newaxis], y)
assert_raises(ValueError, CubicSpline, x1, y1)
wrong_bc = [('periodic', 'clamped'),
((2, 0), (3, 10)),
((1, 0), ),
(0., 0.),
'not-a-typo']
for bc_type in wrong_bc:
assert_raises(ValueError, CubicSpline, x, y, 0, bc_type, True)
# Shapes mismatch when giving arbitrary derivative values:
Y = np.c_[y, y]
bc1 = ('clamped', (1, 0))
bc2 = ('clamped', (1, [0, 0, 0]))
bc3 = ('clamped', (1, [[0, 0]]))
assert_raises(ValueError, CubicSpline, x, Y, 0, bc1, True)
assert_raises(ValueError, CubicSpline, x, Y, 0, bc2, True)
assert_raises(ValueError, CubicSpline, x, Y, 0, bc3, True)
# periodic condition, y[-1] must be equal to y[0]:
assert_raises(ValueError, CubicSpline, x, y, 0, 'periodic', True)
| |
"""Functions used for generating CSS sprites.
These are ported from the Compass sprite library:
http://compass-style.org/reference/compass/utilities/sprites/
"""
from __future__ import absolute_import
import six
import base64
import glob
import hashlib
import logging
import os.path
import tempfile
import time
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from PIL import Image
except ImportError:
try:
import Image
except:
Image = None
from six.moves import xrange
from scss import config
from scss.functions.compass import _image_size_cache
from scss.functions.compass.layouts import PackedSpritesLayout, HorizontalSpritesLayout, VerticalSpritesLayout, DiagonalSpritesLayout
from scss.functions.library import FunctionLibrary
from scss.types import Color, List, Number, String, Boolean
from scss.util import escape
log = logging.getLogger(__name__)
MAX_SPRITE_MAPS = 4096
KEEP_SPRITE_MAPS = int(MAX_SPRITE_MAPS * 0.8)
COMPASS_SPRITES_LIBRARY = FunctionLibrary()
register = COMPASS_SPRITES_LIBRARY.register
# ------------------------------------------------------------------------------
# Compass-like functionality for sprites and images
sprite_maps = {}
def alpha_composite(im1, im2, offset=None, box=None, opacity=1):
im1size = im1.size
im2size = im2.size
if offset is None:
offset = (0, 0)
if box is None:
box = (0, 0) + im2size
o1x, o1y = offset
o2x, o2y, o2w, o2h = box
width = o2w - o2x
height = o2h - o2y
im1_data = im1.load()
im2_data = im2.load()
for y in xrange(height):
for x in xrange(width):
pos1 = o1x + x, o1y + y
if pos1[0] >= im1size[0] or pos1[1] >= im1size[1]:
continue
pos2 = o2x + x, o2y + y
if pos2[0] >= im2size[0] or pos2[1] >= im2size[1]:
continue
dr, dg, db, da = im1_data[pos1]
sr, sg, sb, sa = im2_data[pos2]
da /= 255.0
sa /= 255.0
sa *= opacity
ida = da * (1 - sa)
oa = (sa + ida)
if oa:
pixel = (
int(round((sr * sa + dr * ida)) / oa),
int(round((sg * sa + dg * ida)) / oa),
int(round((sb * sa + db * ida)) / oa),
int(round(255 * oa))
)
else:
pixel = (0, 0, 0, 0)
im1_data[pos1] = pixel
return im1
@register('sprite-map')
def sprite_map(g, **kwargs):
"""
Generates a sprite map from the files matching the glob pattern.
Uses the keyword-style arguments passed in to control the placement.
$direction - Sprite map layout. Can be `vertical` (default), `horizontal`, `diagonal` or `smart`.
$position - For `horizontal` and `vertical` directions, the position of the sprite. (defaults to `0`)
$<sprite>-position - Position of a given sprite.
$padding, $spacing - Adds paddings to sprites (top, right, bottom, left). (defaults to `0, 0, 0, 0`)
$<sprite>-padding, $<sprite>-spacing - Padding for a given sprite.
$dst-color - Together with `$src-color`, forms a map of source colors to be converted to destiny colors (same index of `$src-color` changed to `$dst-color`).
$<sprite>-dst-color - Destiny colors for a given sprite. (defaults to `$dst-color`)
$src-color - Selects source colors to be converted to the corresponding destiny colors. (defaults to `black`)
$<sprite>-dst-color - Source colors for a given sprite. (defaults to `$src-color`)
$collapse - Collapses every image in the sprite map to a fixed size (`x` and `y`).
$collapse-x - Collapses a size for `x`.
$collapse-y - Collapses a size for `y`.
"""
if not Image:
raise Exception("Images manipulation require PIL")
now_time = time.time()
g = String(g, quotes=None).value
if g in sprite_maps:
sprite_maps[glob]['*'] = now_time
elif '..' not in g: # Protect against going to prohibited places...
if callable(config.STATIC_ROOT):
glob_path = g
rfiles = files = sorted(config.STATIC_ROOT(g))
else:
glob_path = os.path.join(config.STATIC_ROOT, g)
files = glob.glob(glob_path)
files = sorted((f, None) for f in files)
rfiles = [(rf[len(config.STATIC_ROOT):], s) for rf, s in files]
if not files:
log.error("Nothing found at '%s'", glob_path)
return String.unquoted('')
map_name = os.path.normpath(os.path.dirname(g)).replace('\\', '_').replace('/', '_')
key = list(zip(*files)[0]) + [repr(kwargs), config.ASSETS_URL]
key = map_name + '-' + base64.urlsafe_b64encode(hashlib.md5(repr(key)).digest()).rstrip('=').replace('-', '_')
asset_file = key + '.png'
ASSETS_ROOT = config.ASSETS_ROOT or os.path.join(config.STATIC_ROOT, 'assets')
asset_path = os.path.join(ASSETS_ROOT, asset_file)
cache_path = os.path.join(config.CACHE_ROOT or ASSETS_ROOT, asset_file + '.cache')
inline = Boolean(kwargs.get('inline', False))
sprite_map = None
asset = None
file_asset = None
inline_asset = None
if os.path.exists(asset_path) or inline:
try:
save_time, file_asset, inline_asset, sprite_map, sizes = pickle.load(open(cache_path))
if file_asset:
sprite_maps[file_asset.render()] = sprite_map
if inline_asset:
sprite_maps[inline_asset.render()] = sprite_map
if inline:
asset = inline_asset
else:
asset = file_asset
except:
pass
if sprite_map:
for file_, storage in files:
if storage is not None:
d_obj = storage.modified_time(file_)
_time = time.mktime(d_obj.timetuple())
else:
_time = os.path.getmtime(file_)
if save_time < _time:
if _time > now_time:
log.warning("File '%s' has a date in the future (cache ignored)" % file_)
sprite_map = None # Invalidate cached sprite map
break
if sprite_map is None or asset is None:
cache_buster = Boolean(kwargs.get('cache_buster', True))
direction = String.unquoted(kwargs.get('direction', config.SPRTE_MAP_DIRECTION)).value
repeat = String.unquoted(kwargs.get('repeat', 'no-repeat')).value
collapse = kwargs.get('collapse', Number(0))
if isinstance(collapse, List):
collapse_x = int(Number(collapse[0]).value)
collapse_y = int(Number(collapse[-1]).value)
else:
collapse_x = collapse_y = int(Number(collapse).value)
if 'collapse_x' in kwargs:
collapse_x = int(Number(kwargs['collapse_x']).value)
if 'collapse_y' in kwargs:
collapse_y = int(Number(kwargs['collapse_y']).value)
position = Number(kwargs.get('position', 0))
if not position.is_simple_unit('%') and position.value > 1:
position = position.value / 100.0
else:
position = position.value
if position < 0:
position = 0.0
elif position > 1:
position = 1.0
padding = kwargs.get('padding', kwargs.get('spacing', Number(0)))
padding = [int(Number(v).value) for v in List.from_maybe(padding)]
padding = (padding * 4)[:4]
dst_colors = kwargs.get('dst_color')
dst_colors = [list(Color(v).value[:3]) for v in List.from_maybe(dst_colors) if v]
src_colors = kwargs.get('src_color', Color.from_name('black'))
src_colors = [tuple(Color(v).value[:3]) for v in List.from_maybe(src_colors)]
len_colors = max(len(dst_colors), len(src_colors))
dst_colors = (dst_colors * len_colors)[:len_colors]
src_colors = (src_colors * len_colors)[:len_colors]
def images(f=lambda x: x):
for file_, storage in f(files):
if storage is not None:
_file = storage.open(file_)
else:
_file = file_
_image = Image.open(_file)
yield _image
names = tuple(os.path.splitext(os.path.basename(file_))[0] for file_, storage in files)
has_dst_colors = False
all_dst_colors = []
all_src_colors = []
all_positions = []
all_paddings = []
for name in names:
name = name.replace('-', '_')
_position = kwargs.get(name + '_position')
if _position is None:
_position = position
else:
_position = Number(_position)
if not _position.is_simple_unit('%') and _position.value > 1:
_position = _position.value / 100.0
else:
_position = _position.value
if _position < 0:
_position = 0.0
elif _position > 1:
_position = 1.0
all_positions.append(_position)
_padding = kwargs.get(name + '_padding', kwargs.get(name + '_spacing'))
if _padding is None:
_padding = padding
else:
_padding = [int(Number(v).value) for v in List.from_maybe(_padding)]
_padding = (_padding * 4)[:4]
all_paddings.append(_padding)
_dst_colors = kwargs.get(name + '_dst_color')
if _dst_colors is None:
_dst_colors = dst_colors
if dst_colors:
has_dst_colors = True
else:
has_dst_colors = True
_dst_colors = [list(Color(v).value[:3]) for v in List.from_maybe(_dst_colors) if v]
_src_colors = kwargs.get(name + '_src_color', Color.from_name('black'))
if _src_colors is None:
_src_colors = src_colors
else:
_src_colors = [tuple(Color(v).value[:3]) for v in List.from_maybe(_src_colors)]
_len_colors = max(len(_dst_colors), len(_src_colors))
_dst_colors = (_dst_colors * _len_colors)[:_len_colors]
_src_colors = (_src_colors * _len_colors)[:_len_colors]
all_dst_colors.append(_dst_colors)
all_src_colors.append(_src_colors)
sizes = tuple((collapse_x or i.size[0], collapse_y or i.size[1]) for i in images())
if direction == 'horizontal':
layout = HorizontalSpritesLayout(sizes, all_paddings, position=all_positions)
elif direction == 'vertical':
layout = VerticalSpritesLayout(sizes, all_paddings, position=all_positions)
elif direction == 'diagonal':
layout = DiagonalSpritesLayout(sizes, all_paddings)
elif direction == 'smart':
layout = PackedSpritesLayout(sizes, all_paddings)
else:
raise Exception("Invalid direction %r" % (direction,))
layout_positions = list(layout)
new_image = Image.new(
mode='RGBA',
size=(layout.width, layout.height),
color=(0, 0, 0, 0)
)
useless_dst_color = has_dst_colors
offsets_x = []
offsets_y = []
for i, image in enumerate(images()):
x, y, width, height, cssx, cssy, cssw, cssh = layout_positions[i]
iwidth, iheight = image.size
if has_dst_colors:
pixdata = image.load()
for _y in xrange(iheight):
for _x in xrange(iwidth):
pixel = pixdata[_x, _y]
a = pixel[3] if len(pixel) == 4 else 255
if a:
rgb = pixel[:3]
for j, dst_color in enumerate(all_dst_colors[i]):
if rgb == all_src_colors[i][j]:
new_color = tuple([int(c) for c in dst_color] + [a])
if pixel != new_color:
pixdata[_x, _y] = new_color
useless_dst_color = False
break
if iwidth != width or iheight != height:
cy = 0
while cy < iheight:
cx = 0
while cx < iwidth:
new_image = alpha_composite(new_image, image, (x, y), (cx, cy, cx + width, cy + height))
cx += width
cy += height
else:
new_image.paste(image, (x, y))
offsets_x.append(cssx)
offsets_y.append(cssy)
if useless_dst_color:
log.warning("Useless use of $dst-color in sprite map for files at '%s' (never used for)" % glob_path)
filetime = int(now_time)
if not inline:
try:
new_image.save(asset_path)
url = '%s%s' % (config.ASSETS_URL, asset_file)
if cache_buster:
url += '?_=%s' % filetime
except IOError:
log.exception("Error while saving image")
inline = True
if inline:
output = six.BytesIO()
new_image.save(output, format='PNG')
contents = output.getvalue()
output.close()
mime_type = 'image/png'
url = 'data:' + mime_type + ';base64,' + base64.b64encode(contents)
url = 'url(%s)' % escape(url)
if inline:
asset = inline_asset = List([String.unquoted(url), String.unquoted(repeat)])
else:
asset = file_asset = List([String.unquoted(url), String.unquoted(repeat)])
# Add the new object:
sprite_map = dict(zip(names, zip(sizes, rfiles, offsets_x, offsets_y)))
sprite_map['*'] = now_time
sprite_map['*f*'] = asset_file
sprite_map['*k*'] = key
sprite_map['*n*'] = map_name
sprite_map['*t*'] = filetime
cache_tmp = tempfile.NamedTemporaryFile(delete=False, dir=ASSETS_ROOT)
pickle.dump((now_time, file_asset, inline_asset, sprite_map, zip(files, sizes)), cache_tmp)
cache_tmp.close()
os.rename(cache_tmp.name, cache_path)
# Use the sorted list to remove older elements (keep only 500 objects):
if len(sprite_maps) > MAX_SPRITE_MAPS:
for a in sorted(sprite_maps, key=lambda a: sprite_maps[a]['*'], reverse=True)[KEEP_SPRITE_MAPS:]:
del sprite_maps[a]
log.warning("Exceeded maximum number of sprite maps (%s)" % MAX_SPRITE_MAPS)
sprite_maps[asset.render()] = sprite_map
for file_, size in sizes:
_image_size_cache[file_] = size
# TODO this sometimes returns an empty list, or is never assigned to
return asset
@register('sprite-map-name', 1)
def sprite_map_name(map):
"""
Returns the name of a sprite map The name is derived from the folder than
contains the sprites.
"""
map = map.render()
sprite_map = sprite_maps.get(map)
if not sprite_map:
log.error("No sprite map found: %s", map, extra={'stack': True})
if sprite_map:
return String.unquoted(sprite_map['*n*'])
return String.unquoted('')
@register('sprite-file', 2)
def sprite_file(map, sprite):
"""
Returns the relative path (from the images directory) to the original file
used when construction the sprite. This is suitable for passing to the
image_width and image_height helpers.
"""
map = map.render()
sprite_map = sprite_maps.get(map)
sprite_name = String.unquoted(sprite).value
sprite = sprite_map and sprite_map.get(sprite_name)
if not sprite_map:
log.error("No sprite map found: %s", map, extra={'stack': True})
elif not sprite:
log.error("No sprite found: %s in %s", sprite_name, sprite_map['*n*'], extra={'stack': True})
if sprite:
return String(sprite[1][0])
return String.unquoted('')
@register('sprites', 1)
@register('sprite-names', 1)
def sprites(map):
map = map.render()
sprite_map = sprite_maps.get(map, {})
return List(list(String.unquoted(s) for s in sorted(s for s in sprite_map if not s.startswith('*'))))
@register('sprite', 2)
@register('sprite', 3)
@register('sprite', 4)
@register('sprite', 5)
def sprite(map, sprite, offset_x=None, offset_y=None, cache_buster=True):
"""
Returns the image and background position for use in a single shorthand
property
"""
map = map.render()
sprite_map = sprite_maps.get(map)
sprite_name = String.unquoted(sprite).value
sprite = sprite_map and sprite_map.get(sprite_name)
if not sprite_map:
log.error("No sprite map found: %s", map, extra={'stack': True})
elif not sprite:
log.error("No sprite found: %s in %s", sprite_name, sprite_map['*n*'], extra={'stack': True})
if sprite:
url = '%s%s' % (config.ASSETS_URL, sprite_map['*f*'])
if cache_buster:
url += '?_=%s' % sprite_map['*t*']
x = Number(offset_x or 0, 'px')
y = Number(offset_y or 0, 'px')
if not x.value or (x.value <= -1 or x.value >= 1) and not x.is_simple_unit('%'):
x -= Number(sprite[2], 'px')
if not y.value or (y.value <= -1 or y.value >= 1) and not y.is_simple_unit('%'):
y -= Number(sprite[3], 'px')
url = "url(%s)" % escape(url)
return List([String.unquoted(url), x, y])
return List([Number(0), Number(0)])
@register('sprite-url', 1)
@register('sprite-url', 2)
def sprite_url(map, cache_buster=True):
"""
Returns a url to the sprite image.
"""
map = map.render()
sprite_map = sprite_maps.get(map)
if not sprite_map:
log.error("No sprite map found: %s", map, extra={'stack': True})
if sprite_map:
url = '%s%s' % (config.ASSETS_URL, sprite_map['*f*'])
if cache_buster:
url += '?_=%s' % sprite_map['*t*']
url = "url(%s)" % escape(url)
return String.unquoted(url)
return String.unquoted('')
@register('sprite-position', 2)
@register('sprite-position', 3)
@register('sprite-position', 4)
def sprite_position(map, sprite, offset_x=None, offset_y=None):
"""
Returns the position for the original image in the sprite.
This is suitable for use as a value to background-position.
"""
map = map.render()
sprite_map = sprite_maps.get(map)
sprite_name = String.unquoted(sprite).value
sprite = sprite_map and sprite_map.get(sprite_name)
if not sprite_map:
log.error("No sprite map found: %s", map, extra={'stack': True})
elif not sprite:
log.error("No sprite found: %s in %s", sprite_name, sprite_map['*n*'], extra={'stack': True})
if sprite:
x = None
if offset_x is not None and not isinstance(offset_x, Number):
x = offset_x
if not x or x.value not in ('left', 'right', 'center'):
if x:
offset_x = None
x = Number(offset_x or 0, 'px')
if not x.value or (x.value <= -1 or x.value >= 1) and not x.is_simple_unit('%'):
x -= Number(sprite[2], 'px')
y = None
if offset_y is not None and not isinstance(offset_y, Number):
y = offset_y
if not y or y.value not in ('top', 'bottom', 'center'):
if y:
offset_y = None
y = Number(offset_y or 0, 'px')
if not y.value or (y.value <= -1 or y.value >= 1) and not y.is_simple_unit('%'):
y -= Number(sprite[3], 'px')
return List([x, y])
return List([Number(0), Number(0)])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.