gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import sys
import warnings
import IECore
import GafferUI
import Gaffer
QtCore = GafferUI._qtImport( "QtCore" )
QtGui = GafferUI._qtImport( "QtGui" )
class Window( GafferUI.ContainerWidget ) :
SizeMode = IECore.Enum.create( "Fixed", "Manual", "Automatic" )
## \todo Remove the deprecated resizable argument
def __init__( self, title="GafferUI.Window", borderWidth=0, resizeable=None, child=None, sizeMode=SizeMode.Manual, icon="GafferLogoMini.png", **kw ) :
GafferUI.ContainerWidget.__init__(
self, QtGui.QWidget( None, QtCore.Qt.WindowFlags( QtCore.Qt.Window ), **kw )
)
self.__child = None
self.__childWindows = set()
self.__qtLayout = QtGui.QGridLayout()
self.__qtLayout.setContentsMargins( borderWidth, borderWidth, borderWidth, borderWidth )
self.__qtLayout.setSizeConstraint( QtGui.QLayout.SetMinAndMaxSize )
# The initial size of a widget in qt "depends on the user's platform and screen geometry".
# In other words, it is useless. We use this flag to determine whether or not our size is
# this meaningless initial size, or whether it has been set appropriately. This is needed in
# resizeToFitChild().
self.__sizeValid = False
if len( self.__caughtKeys() ):
# set up a key press handler, so we can catch various key presses and stop them being handled by the
# host application
self.__keyPressConnection = self.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ) )
# \todo Does this hurt performance? Maybe keyPressSignal() should set this up when it's called?
self._qtWidget().setFocusPolicy( QtCore.Qt.ClickFocus )
self._qtWidget().setLayout( self.__qtLayout )
self._qtWidget().installEventFilter( _windowEventFilter )
self._qtWidget().setObjectName("gafferWindow")
self._setStyleSheet()
self.setTitle( title )
self.setIcon( icon )
if resizeable is not None :
self.setResizeable( resizeable )
else :
self.setSizeMode( sizeMode )
self.__closedSignal = GafferUI.WidgetSignal()
self.setChild( child )
def setTitle( self, title ) :
self._qtWidget().setWindowTitle( title )
def getTitle( self ) :
return self._qtWidget().windowTitle()
## Overridden from the base class to ensure that
# window.setVisible( True ) also raises and unminimizes
# the window.
def setVisible( self, visible ) :
GafferUI.Widget.setVisible( self, visible )
if self.visible() :
if self._qtWidget().isMinimized() :
self._qtWidget().showNormal()
self._qtWidget().raise_()
def removeChild( self, child ) :
assert( child is self.__child or child in self.__childWindows )
child._qtWidget().setParent( None )
child._applyVisibility()
if child is self.__child :
self.__child = None
else :
self.__childWindows.remove( child )
def addChild( self, child ) :
if isinstance( child, Window ) :
self.addChildWindow( child )
else :
if self.getChild() is not None :
raise Exception( "Window can only hold one child" )
self.setChild( child )
def setChild( self, child ) :
oldChild = self.getChild()
if oldChild is not None :
self.removeChild( oldChild )
if child is not None :
oldParent = child.parent()
if oldParent is not None :
oldParent.removeChild( child )
self.__child = child
self.__qtLayout.addWidget( child._qtWidget(), 0, 0 )
child._applyVisibility()
def getChild( self ) :
return self.__child
## Adding a child window causes the child to stay
# on top of the parent at all times. This is useful for
# preventing dialogues and the like from disappearing behind
# the main window. Note that the parent will keep the child
# window alive until it is removed using removeChild().
def addChildWindow( self, childWindow ) :
assert( isinstance( childWindow, Window ) )
oldParent = childWindow.parent()
if oldParent is not None :
oldParent.removeChild( childWindow )
self.__childWindows.add( childWindow )
# We have the following criteria for child windows :
#
# - they must always stay on top of their parent
# - even when the parent is fullscreen
# - they must open somewhere sensible by default
# - ideally centered on the parent
# - they must take focus nicely when asked (by PathChooserDialogue for instance)
#
# On OS X, the Tool window type does an excellent job
# of all of that, as well as looking pretty. But if we use
# the Dialog window type, they disappear behind full screen
# windows.
#
# On Linux, the Tool window type does a poor job, opening
# in arbitrary places, and displaying various focus problems.
# The Dialog type on the other hand does a much better job. Of
# course, this being X11, different window managers will do different
# things, but on the whole the Dialog type seems best for X11.
childWindowType = QtCore.Qt.Tool if sys.platform == "darwin" else QtCore.Qt.Dialog
childWindowFlags = ( childWindow._qtWidget().windowFlags() & ~QtCore.Qt.WindowType_Mask ) | childWindowType
childWindow._qtWidget().setParent( self._qtWidget(), childWindowFlags )
childWindow._applyVisibility()
## Returns a list of all the windows parented to this one.
def childWindows( self ) :
return list( self.__childWindows )
## \deprecated
def setResizeable( self, resizeable ) :
warnings.warn( "Window.setResizeable() is deprecated, use Window.setSizeMode() instead.", DeprecationWarning, 2 )
if resizeable :
self.setSizeMode( self.SizeMode.Manual )
else :
self.setSizeMode( self.SizeMode.Fixed )
## \deprecated
def getResizeable( self ) :
warnings.warn( "Window.getResizeable() is deprecated, use Window.getSizeMode() instead.", DeprecationWarning, 2 )
return self.getSizeMode() == self.SizeMode.Manual
def setSizeMode( self, sizeMode ) :
self.__sizeMode = sizeMode
if sizeMode == self.SizeMode.Manual :
self.__qtLayout.setSizeConstraint( QtGui.QLayout.SetDefaultConstraint )
else :
self.__qtLayout.setSizeConstraint( QtGui.QLayout.SetFixedSize )
def getSizeMode( self ) :
return self.__sizeMode
## Resizes the window to fit the requirements of the current child.
# The shrink or expand arguments may be set to False to prevent the
# window becoming smaller or larger than its current size if that is
# not desired.
def resizeToFitChild( self, shrink=True, expand=True ) :
s = self._qtWidget().size()
sizeHint = self._qtWidget().sizeHint()
if expand or not self.__sizeValid :
s = s.expandedTo( sizeHint )
if shrink or not self.__sizeValid :
s = s.boundedTo( sizeHint )
self._qtWidget().resize( s )
def setPosition( self, position ) :
self._qtWidget().move( position.x, position.y )
def getPosition( self ) :
return IECore.V2i( self._qtWidget().x(), self._qtWidget().y() )
def setFullScreen( self, fullScreen ) :
if fullScreen :
self._qtWidget().showFullScreen()
else :
self._qtWidget().showNormal()
def getFullScreen( self ) :
return self._qtWidget().isFullScreen()
def setIcon( self, imageOrImageFileName ) :
if isinstance( imageOrImageFileName, basestring ) :
self.__image = GafferUI.Image( imageOrImageFileName )
else :
self.__image = imageOrImageFileName
self._qtWidget().setWindowIcon( QtGui.QIcon( self.__image._qtPixmap() ) )
def getIcon( self ) :
return self.__image
## Requests that this window be closed - this function may either be called
# directly or in response to the user attempting to close the window.
# If successful, setVisible( False ) will be called on the window and True will
# be returned. However, the window may choose to deny the request in which case
# the window will remain visible and False will be returned. The latter possibility
# is to allow windows to take appropriate action when closing a window would mean a
# user losing work. If a window is not visible on entry to this function then no
# action is taken and False is returned.
def close( self ) :
if not self.getVisible() :
return False
if self._acceptsClose() :
self.setVisible( False )
self.closedSignal()( self )
return True
else :
return False
__caughtKeysSet = None
@classmethod
def __caughtKeys( cls ):
if cls.__caughtKeysSet is None:
try:
# are we in maya? If so, we need to catch the ctrl and shift key presses to prevent
# maya from handling them and doing crazy focus stealing stuff
import maya
cls.__caughtKeysSet = set( ["Control", "Shift"] )
except ImportError:
cls.__caughtKeysSet = set()
return cls.__caughtKeysSet
def __keyPress( self, widget, event ):
return event.key in self.__caughtKeys()
## Subclasses may override this to deny the closing of a window triggered
# either by user action or by a call to close(). Simply return False to
# prevent the closing.
def _acceptsClose( self ) :
return True
## A signal emitted when the window has been closed successfully, either through
# user action or a call to close()
def closedSignal( self ) :
return self.__closedSignal
class _WindowEventFilter( QtCore.QObject ) :
def __init__( self ) :
QtCore.QObject.__init__( self )
def eventFilter( self, qObject, qEvent ) :
type = qEvent.type()
if type==QtCore.QEvent.Close :
widget = GafferUI.Widget._owner( qObject )
closed = widget.close()
if closed :
qEvent.accept()
else :
qEvent.ignore()
return True
elif type==QtCore.QEvent.LayoutRequest :
widget = GafferUI.Widget._owner( qObject )
if widget.getSizeMode() == widget.SizeMode.Automatic :
widget.resizeToFitChild()
return True
elif type==QtCore.QEvent.Resize :
widget = GafferUI.Widget._owner( qObject )
widget._Window__sizeValid = True
return False
# this single instance is used by all window widgets
_windowEventFilter = _WindowEventFilter()
| |
import collections
### a pure python implementation of the dict data structure
# follows the interface laid down by Benjamin mcduder
MINSIZE = 8 ### initial size of dict items
PERTURB_SHIFT = 5 ### used in open addressing probe, see below
dummy = "<dummy key>" ### used in open addressing probe, see below (??)
### a discusison of the open addressing scheme from the python dict
'''
Major subtleties ahead: Most hash schemes depend on having a "good" hash
function, in the sense of simulating randomness. Python doesn't: its most
important hash functions (for strings and ints) are very regular in common
cases:
>>> map(hash, (0, 1, 2, 3))
[0, 1, 2, 3]
>>> map(hash, ("namea", "nameb", "namec", "named"))
[-1658398457, -1658398460, -1658398459, -1658398462]
>>>
This isn't necessarily bad! To the contrary, in a table of size 2**i, taking
the low-order i bits as the initial table index is extremely fast, and there
are no collisions at all for dicts indexed by a contiguous range of ints.
The same is approximately true when keys are "consecutive" strings. So this
gives better-than-random behavior in common cases, and that's very desirable.
OTOH, when collisions occur, the tendency to fill contiguous slices of the
hash table makes a good collision resolution strategy crucial. Taking only
the last i bits of the hash code is also vulnerable: for example, consider
the list [i << 16 for i in range(20000)] as a set of keys. Since ints are
their own hash codes, and this fits in a dict of size 2**15, the last 15 bits
of every hash code are all 0: they *all* map to the same table index.
But catering to unusual cases should not slow the usual ones, so we just take
the last i bits anyway. It's up to collision resolution to do the rest. If
we *usually* find the key we're looking for on the first try (and, it turns
out, we usually do -- the table load factor is kept under 2/3, so the odds
are solidly in our favor), then it makes best sense to keep the initial index
computation dirt cheap.
The first half of collision resolution is to visit table indices via this
recurrence:
j = ((5*j) + 1) mod 2**i
For any initial j in range(2**i), repeating that 2**i times generates each
int in range(2**i) exactly once (see any text on random-number generation for
proof). By itself, this doesn't help much: like linear probing (setting
j += 1, or j -= 1, on each loop trip), it scans the table entries in a fixed
order. This would be bad, except that's not the only thing we do, and it's
actually *good* in the common cases where hash keys are consecutive. In an
example that's really too small to make this entirely clear, for a table of
size 2**3 the order of indices is:
0 -> 1 -> 6 -> 7 -> 4 -> 5 -> 2 -> 3 -> 0 [and here it's repeating]
If two things come in at index 5, the first place we look after is index 2,
not 6, so if another comes in at index 6 the collision at 5 didn't hurt it.
Linear probing is deadly in this case because there the fixed probe order
is the *same* as the order consecutive keys are likely to arrive. But it's
extremely unlikely hash codes will follow a 5*j+1 recurrence by accident,
and certain that consecutive hash codes do not.
The other half of the strategy is to get the other bits of the hash code
into play. This is done by initializing a (unsigned) vrbl "perturb" to the
full hash code, and changing the recurrence to:
j = (5*j) + 1 + perturb;
perturb >>= PERTURB_SHIFT;
use j % 2**i as the next table index;
Now the probe sequence depends (eventually) on every bit in the hash code,
and the pseudo-scrambling property of recurring on 5*j+1 is more valuable,
because it quickly magnifies small differences in the bits that didn't affect
the initial index. Note that because perturb is unsigned, if the recurrence
is executed often enough perturb eventually becomes and remains 0. At that
point (very rarely reached) the recurrence is on (just) 5*j+1 again, and
that's certain to find an empty slot eventually (since it generates every int
in range(2**i), and we make sure there's always at least one empty slot).
Selecting a good value for PERTURB_SHIFT is a balancing act. You want it
small so that the high bits of the hash code continue to affect the probe
sequence across iterations; but you want it large so that in really bad cases
the high-order hash bits have an effect on early iterations. 5 was "the
best" in minimizing total collisions across experiments Tim Peters ran (on
both normal and pathological cases), but 4 and 6 weren't significantly worse.
Historical: Reimer Behrends contributed the idea of using a polynomial-based
approach, using repeated multiplication by x in GF(2**n) where an irreducible
polynomial for each table size was chosen such that x was a primitive root.
Christian Tismer later extended that to use division by x instead, as an
efficient way to get the high bits of the hash code into play. This scheme
also gave excellent collision statistics, but was more expensive: two
if-tests were required inside the loop; computing "the next" index took about
the same number of operations but without as much potential parallelism
(e.g., computing 5*j can go on at the same time as computing 1+perturb in the
above, and then shifting perturb can be done while the table index is being
masked); and the PyDictObject struct required a member to hold the table's
polynomial. In Tim's experiments the current scheme ran faster, produced
equally good collision statistics, needed less code & used less memory.
*/'''
class Entry(object):
"""
A hash table entry.
Attributes:
* key - The key for this entry.
* hash - The has of the key.
* value - The value associated with the key.
"""
__slots__ = ("key", "value", "hash")
def __init__(self):
self.key = None
self.value = None
self.hash = 0
def __repr__(self):
return str(self.key) + " : " + str(self.value)
class Dict(object):
"""
A mapping interface implemented as a hash table.
Attributes:
* used - The number of entires used in the table.
* filled - used + number of entries with a dummy key.
* table - List of entries; contains the actual dict data.
* mask - Length of table - 1. Used to fetch values.
"""
__slots__ = ("filled","used","mask","table")
def __init__(self, args=None, **kwargs):
self.init()
if args or kwargs:
self._update(args, kwargs)
def init(self):
''' Initialize an empty dict '''
self.filled = 0
self.used = 0
self.table = []
self.mask = MINSIZE - 1
for i in range(MINSIZE):
self.table.append(Entry())
def pop(self, *args):
'''
Remove and return the value for a key.
'''
default = len(args) == 2
try:
my_val = self[args[0]] ### Q: why does this work? A: __getitem__(self, key) is defined to use self._lookup(key). Syntactic sugar at its sweetest.
except KeyError as k:
if default:
return args[1]
else:
raise KeyError from k
del self[args[0]]
return my_val
def popitem(self):
'''
Remove and return an arbitrary (key, value) pair from the dictionary.
popitem() is useful to destructively iterate over a dictionary, as often used in set algorithms.
If the dictionary is empty, calling popitem() raises a KeyError.
'''
if self.used == 0:
raise KeyError("cannot pop from an empty dict")
else:
# find the first non-empty slot by linear search, remove it from the dict
i = 0
first_entry = self.table[i]
val = first_entry.value
if val is None:
i = first_entry.hash
if i > self.mask or i < i:
i = 1
entry = self.table[i]
while entry.value is not None:
i += 1
if i > self.mask:
i = 1
entry = self.table[i]
arb = entry.key, entry.value
self._del(entry)
return arb
def setdefault(self, key, default=0):
"""
If key is in the dictionary, return it. Otherwise, set it to the default
value.
"""
entry = self._lookup(key)
if entry.value is None:
entry.value = default
return default
else:
return entry.value
def _lookup(self, key):
''' Find the key for entry '''
# hash the key, examine entry
key_hash = hash(key)
index = key_hash & self.mask
entry = self.table[index]
if entry.key is None or entry.key is key:
return entry
# introduce the notion of 'free' nodes which used to hold entries, but have since been deleted.
free = None
if entry.key == dummy:
free = entry
# or entry might already be a valid element of the dict
elif entry.hash == key_hash and key == entry.key:
return entry
# none of the above, and entry cannot be used, perturb the hash to find the next best place
perturb = key_hash
while True:
# generate new index, retrieve entry at index
index = (index << 2) + index + perturb + 1;
entry = self.table[index & self.mask]
# test for suitable entry
if free is not None: # encountered a dummy key in last entry, use it.
return free
elif entry.key is None: # empty spot, use it.
return entry
elif entry.key is key or (entry.hash == key_hash and key == entry.key):
return entry # already in the dict
elif entry.key is dummy and free is None:
free = entry
def _resize(self, minused):
"""
Resize the dictionary to at least minused.
"""
# find the smallest value for new table size
newsize = MINSIZE
while newsize <= minused:
newsize <<= 1
# create the new table, populate with empty entries
new_table = []
while len(new_table) <= newsize:
new_table.append(Entry())
# replace the old table with the new table
old_table = self.table
self.table = new_table
self.used = 0
self.filled = 0
# copy over entries from the old table into the new table
for entry in old_table:
if entry.value is not None:
self._insert_into_clean(entry)
elif entry.key == dummy:
entry.key = None
self.mask = newsize - 1
def _insert_into_clean(self, entry):
"""
Insert an item in a clean dict. This is a helper for resizing.
"""
i = entry.hash & self.mask
temp_entry = self.table[i]
perbturb = entry.hash
while temp_entry.key is not None: # probe for an empty entry
i = (i << 2) + i + perturb + 1
temp_entry = self.table[i & self.mask]
perturb = perturb >> PERTURB_SHIFT
temp_entry.key = entry.key
temp_entry.value = entry.value
temp_entry.hash = entry.hash
self.used += 1
self.filled += 1
def _insert(self, key, value):
"""
Add a new value to the dictionary or replace an old one.
"""
entry = self._lookup(key)
if entry.value == None:
self.used += 1
if entry.key is not dummy:
self.filled += 1
entry.key = key
entry.value = value
entry.hash = hash(key)
def _del(self, entry):
"""
Mark an entry as free with the dummy key.
"""
entry.key = dummy
entry.value = None
self.used -= 1
def __getitem__(self, key):
''' Get the value associated with the given key, or raise a KeyError'''
entry = self._lookup(key)
if entry.value is None:
raise KeyError("not present in keys: {0!r}".format(key))
else:
return entry.value
def __setitem__(self, key, what):
''' Create or update a new entry '''
# None is used as a marker for empty entries, so it can't be in a
# dictionary.
assert what is not None and key is not None, \
"key and value must not be None"
old_used = self.used
self._insert(key, what)
# Maybe resize the dict.
if not (self.used > old_used and
self.filled * 3 >= (self.mask + 1)*2): # avoid division when possible.
return
# Large dictionaries (< 5000) are only doubled in size.
factor = 2 if self.used > 5000 else 4
self._resize(factor*self.used)
def __delitem__(self, key):
entry = self._lookup(key)
if entry.value is None:
raise KeyError("no such key: {0!r}".format(key))
self._del(entry)
def __contains__(self, key):
"""
Check if a key is in the dictionary.
"""
entry = self._lookup(key)
return entry is not None
def __eq__(self, other):
pass
def __ne__(self, other):
return not self == other
def keys(self):
"""
Return a list of keys in the dictionary.
"""
return [entry.key for entry in self.table if entry.value is not None]
def values(self):
"""
Return a list of values in the dictionary.
"""
return [entry.value for entry in self.table if entry.value is not None]
def items(self):
"""
Return a list of key-value pairs.
"""
return [(entry.key, entry.value) for entry in self.table if entry.value is not None]
def __iter__(self):
""" Return an iterator over keys """
pass
def itervalues(self):
"""
Return an iterator over the values in the dictionary.
"""
pass
def iterkeys(self):
"""
Return an iterator over the keys in the dictionary.
"""
pass
def iteritems(self):
"""
Return an iterator over key-value pairs.
"""
pass
def _merge(self, mapping):
"""
Update the dictionary from a mapping.
"""
for key in mapping.keys():
self[key] = mapping[key]
def _from_sequence(self, seq):
''' Update values in the dictionary from a sequence of 2-ary key, value pairs. '''
for double in seq:
if len(double) != 2:
raise ValueError("{0!r} doesn't have a length of 2".format(
double))
self[double[0]] = double[1]
def _update(self, arg, kwargs):
''' Internal method to update a dict from a mapping or sequence object '''
if arg:
if isinstance(arg, collections.Mapping):
self._merge(arg)
else:
self._from_sequence(arg)
if kwargs:
self._merge(kwargs)
def update(self, arg=None, **kwargs):
"""
Update the dictionary from a mapping or sequence containing key-value
pairs. Any existing values are overwritten.
"""
self._update(arg, kwargs)
def get(self, key, default=0):
"""
Return the value for key if it exists otherwise the default.
"""
try:
return self[key]
except KeyError:
return default
def __len__(self):
return self.used
def __repr__(self):
r = ["{0!r} : {1!r}".format(k, v) for k, v in self.iteritems()]
return "Dict({" + ", ".join(r) + "})"
if __name__ == "__main__":
regular_dict = {'a': 1, 'b': 2, 'c': 3}
#my_args = [('a',1),('b',2),('c',3)]
#my_dict = Dict(my_args)
my_dict = Dict()
my_dict['a'] = 1
my_dict['b'] = 2
my_dict['c'] = 3
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
# This is a modification of @bcoca's `svc` module
DOCUMENTATION = '''
---
module: runit
author: "James Sumners (@jsumners)"
version_added: "2.3"
short_description: Manage runit services.
description:
- Controls s6 services on remote hosts using the sv utility.
options:
name:
required: true
description:
- Name of the service to manage.
state:
required: false
choices: [ started, stopped, restarted, killed, reloaded, once ]
description:
- C(started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
service (sv restart) and C(killed) will always bounce the service (sv force-stop).
C(reloaded) will send a HUP (sv reload).
C(once) will run a normally downed sv once (sv once), not really
an idempotent operation.
enabled:
required: false
choices: [ "yes", "no" ]
description:
- Wheater the service is enabled or not, if disabled it also implies stopped.
service_dir:
required: false
default: /var/service
description:
- directory runsv watches for services
service_src:
required: false
default: /etc/sv
description:
- directory where services are defined, the source of symlinks to service_dir.
'''
EXAMPLES = '''
# Example action to start sv dnscache, if not running
- sv:
name: dnscache
state: started
# Example action to stop sv dnscache, if running
- sv:
name: dnscache
state: stopped
# Example action to kill sv dnscache, in all cases
- sv:
name: dnscache
state: killed
# Example action to restart sv dnscache, in all cases
- sv:
name: dnscache
state: restarted
# Example action to reload sv dnscache, in all cases
- sv:
name: dnscache
state: reloaded
# Example using alt sv directory location
- sv:
name: dnscache
state: reloaded
service_dir: /run/service
'''
import platform
import shlex
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import *
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Sv(object):
"""
Main class that handles daemontools, can be subclassed and overridden in case
we want to use a 'derivative' like encore, s6, etc
"""
#def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = []
self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.wants_down = False
self.svc_cmd = module.get_bin_path('s6-svc', opt_dirs=self.extra_paths)
self.svstat_cmd = module.get_bin_path('s6-svstat', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([ self.service_dir, self.name ])
self.src_full = '/'.join([ self.service_src, self.name ])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.get_status()
else:
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError:
e = get_exception()
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % str(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
self.execute_command([self.svc_cmd, 'force-stop',self.src_full])
try:
os.unlink(self.svc_full)
except OSError:
e = get_exception()
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % str(e))
def check_return(self, action, (rc, out, err)):
if rc != 0:
self.module.fail_json(msg="s6 '{}' failed.".format(action), error=err)
return (rc, out, err)
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
m = re.search('\(pid (\d+)\)', out)
if m:
self.pid = m.group(1)
m = re.search(' (\d+)s', out)
if m:
self.duration = m.group(1)
if re.search('want down', out):
self.state = True
if re.search('^up', out):
self.state = 'started'
elif re.search('^down', out):
self.state = 'stopped'
else:
self.state = 'unknown'
return
def started(self):
return self.check_return("started", self.start())
def start(self):
return self.execute_command([self.svc_cmd, '-u', self.svc_full])
def stopped(self):
return self.check_return("stopped", self.stop())
def stop(self):
return self.execute_command([self.svc_cmd, '-d', self.svc_full])
def once(self):
return self.check_return("started once", self.execute_command([self.svc_cmd, '-O', self.svc_full]))
# def reloaded(self):
# return self.reload()
# def reload(self):
# return self.execute_command([self.svc_cmd, 'reload', self.svc_full])
def restarted(self):
if self.state == "started":
self.killed()
elif self.state == 'unknown':
self.module.fail_json(msg="Service is in unknown state. duno what to do")
# Lets start (dep)
if self.wants_down:
return self.once()
else:
return self.start()
def restart(self):
return self.execute_command([self.svc_cmd, 'restart', self.svc_full])
def killed(self):
return self.check_return("killed", self.kill())
def kill(self):
return self.execute_command([self.svc_cmd, '-k', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception:
e = get_exception()
self.module.fail_json(msg="failed to execute: %s" % str(e))
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'once']),
enabled = dict(required=False, type='bool'),
dist = dict(required=False, default='runit'),
service_dir = dict(required=False, default='/var/service'),
service_src = dict(required=False, default='/etc/sv'),
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
state = module.params['state']
enabled = module.params['enabled']
sv = Sv(module)
changed = False
orig_state = sv.report()
if enabled is not None and enabled != sv.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
sv.enable()
else:
sv.disable()
except (OSError, IOError):
e = get_exception()
module.fail_json(msg="Could not change service link: %s" % str(e))
if state is not None and state != sv.state:
changed = True
if not module.check_mode:
getattr(sv,state)()
module.exit_json(changed=changed, sv=sv.report())
if __name__ == '__main__':
main()
| |
#
# Copyright (C) 2011-2018 Red Hat, Inc. (https://github.com/Commonjava/indy)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import requests
import json
from threading import Thread
from urlparse import urlparse
import time
import mb.util
PROXIES = """
<proxies>
<proxy>
<id>indy-httprox</id>
<active>true</active>
<protocol>http</protocol>
<host>%(host)s</host>
<port>%(proxy_port)s</port>
<username>%(id)s+tracking</username>
<password>foo</password>
<nonProxyHosts>%(host)s</nonProxyHosts>
</proxy>
</proxies>
"""
SETTINGS = """
<?xml version="1.0"?>
<settings>
<localRepository>%(dir)s/local-repo</localRepository>
<mirrors>
<mirror>
<id>indy</id>
<mirrorOf>*</mirrorOf>
<url>%(url)s/api/folo/track/%(id)s/group/%(id)s</url>
</mirror>
</mirrors>
%(proxies)s
<profiles>
<profile>
<id>resolve-settings</id>
<repositories>
<repository>
<id>central</id>
<url>%(url)s/api/folo/track/%(id)s/group/%(id)s</url>
<releases>
<enabled>true</enabled>
</releases>
<snapshots>
<enabled>false</enabled>
</snapshots>
</repository>
</repositories>
<pluginRepositories>
<pluginRepository>
<id>central</id>
<url>%(url)s/api/folo/track/%(id)s/group/%(id)s</url>
<releases>
<enabled>true</enabled>
</releases>
<snapshots>
<enabled>false</enabled>
</snapshots>
</pluginRepository>
</pluginRepositories>
</profile>
<profile>
<id>deploy-settings</id>
<properties>
<altDeploymentRepository>%(id)s::default::%(url)s/api/folo/track/%(id)s/hosted/%(id)s</altDeploymentRepository>
</properties>
</profile>
</profiles>
<activeProfiles>
<activeProfile>resolve-settings</activeProfile>
<activeProfile>deploy-settings</activeProfile>
</activeProfiles>
</settings>
"""
POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json'}
class Builder(Thread):
def __init__(self, queue, reports):
Thread.__init__(self)
self.queue = queue
self.reports = reports
def run(self):
while True:
try:
(builddir, indy_url, proxy_port, delay) = self.queue.get()
parsed = urlparse(indy_url)
params = {'dir': builddir, 'url':indy_url, 'id': os.path.basename(builddir), 'host': parsed.hostname, 'port': parsed.port, 'proxy_port': proxy_port}
self.setup(builddir, params);
if delay > 0:
print "Delay: %s seconds" % delay
time.sleep(delay)
self.build(builddir)
self.seal_folo_report(params)
self.reports.put((builddir,params['url'], params['id']))
folo_report = self._pull_folo_report(params)
self.promote_by_path(folo_report, params)
self.cleanup_build_group(params)
self.promote_by_group(params)
except (KeyboardInterrupt,SystemExit,Exception) as e:
print e
break
finally:
self.queue.task_done()
def promote_by_path(self, folo_report, params):
"""Run by-path promotion of downloaded content"""
to_promote = {}
downloads = folo_report.get('downloads')
if downloads is not None:
for download in downloads:
key = download['storeKey']
mode = download['accessChannel']
if mode == 'MAVEN_REPO' and key.startswith('remote:'):
path = download['path']
paths = to_promote.get(key)
if paths is None:
paths = []
to_promote[key]=paths
paths.append(path)
print "Promoting dependencies from %s sources into hosted:shared-imports" % len(to_promote.keys())
for key in to_promote:
req = {'source': key, 'target': 'hosted:shared-imports', 'paths': to_promote[key]}
resp = requests.post("%(url)s/api/promotion/paths/promote" % params, json=req, headers=POST_HEADERS)
resp.raise_for_status()
def promote_by_group(self, params):
"""Run by-group promotion of uploaded content"""
print "Promoting build output in hosted:%(id)s to membership of group:builds" % params
req = {'source': 'hosted:%(id)s' % params, 'targetGroup': 'builds'}
resp = requests.post("%(url)s/api/promotion/groups/promote" % params, json=req, headers=POST_HEADERS)
resp.raise_for_status()
def _pull_folo_report(self, params):
"""Pull the Folo tracking report associated with the current build"""
print "Retrieving folo tracking report for: %(id)s" % params
resp = requests.get("%(url)s/api/folo/admin/%(id)s/record" % params)
resp.raise_for_status()
return resp.json()
def seal_folo_report(self, params):
"""Seal the Folo tracking report after the build completes"""
print "Sealing folo tracking report for: %(id)s" % params
resp = requests.post("%(url)s/api/folo/admin/%(id)s/record" % params, data={})
resp.raise_for_status()
def cleanup_build_group(self, params):
"""Remove the group created specifically to channel content into this build,
since we're done with it now.
"""
print "Deleting temporary group:%(id)s used for build time only" % params
resp = requests.delete("%(url)s/api/admin/group/%(id)s" % params)
resp.raise_for_status()
def build(self, builddir):
mb.util.run_cmd("mvn -DskipTests -f %(d)s/pom.xml -s %(d)s/settings.xml clean deploy 2>&1 | tee %(d)s/build.log" % {'d': builddir}, fail=False)
def setup(self, builddir, params):
"""Create the hosted repo and group, then pull the Indy-generated Maven
settings.xml file tuned to that group."""
params['shared_name'] = 'shared-imports'
params['builds_name'] = 'builds'
params['brew_proxies'] = 'brew_proxies'
# Create the shared-imports hosted repo if necessary
resp = requests.head('%(url)s/api/admin/hosted/%(shared_name)s' % params)
if resp.status_code == 404:
shared = {
'type': 'hosted',
'key': "hosted:%(shared_name)s" % params,
'disabled': False,
'doctype': 'hosted',
'name': params['shared_name'],
'allow_releases': True
}
print "POSTing: %s" % json.dumps(shared, indent=2)
resp = requests.post("%(url)s/api/admin/hosted" % params, json=shared, headers=POST_HEADERS)
resp.raise_for_status()
# Create the builds group if necessary
resp = requests.head('%(url)s/api/admin/group/%(builds_name)s' % params)
if resp.status_code == 404:
builds_group = {
'type': 'group',
'key': "group:%(builds_name)s" % params,
'disabled': False,
'doctype': 'group',
'name': params['builds_name'],
}
# Create the builds group if necessary
resp = requests.head('%(url)s/api/admin/group/%(brew_proxies)s' % params)
if resp.status_code == 404:
brew_proxies = {
'type': 'group',
'key': "group:%(brew_proxies)s" % params,
'disabled': False,
'doctype': 'group',
'name': params['brew_proxies'],
}
print "POSTing: %s" % json.dumps(brew_proxies, indent=2)
resp = requests.post("%(url)s/api/admin/group" % params, json=brew_proxies, headers=POST_HEADERS)
resp.raise_for_status()
# Create the hosted repo for this build
hosted = {
'type': 'hosted',
'key': "hosted:%(id)s" % params,
'disabled': False,
'doctype': 'hosted',
'name': params['id'],
'allow_releases': True,
'allow_snapshots': True
}
print "POSTing: %s" % json.dumps(hosted, indent=2)
resp = requests.post("%(url)s/api/admin/hosted" % params, json=hosted, headers=POST_HEADERS)
resp.raise_for_status()
# Create the group for this build
group = {
'type': 'group',
'key': "group:%(id)s" % params,
'disabled': False,
'doctype': 'group',
'name': params['id'],
'constituents': [
"hosted:%(id)s" % params,
'group:builds',
'group:brew_proxies',
'hosted:shared-imports',
'group:public'
]
}
print "POSTing: %s" % json.dumps(group, indent=2)
resp = requests.post("%(url)s/api/admin/group" % params, json=group, headers=POST_HEADERS)
resp.raise_for_status()
if params.get('proxy_port') is not None:
params['proxies'] = PROXIES % params
else:
params['proxies'] = ''
# Write the settings.xml we need for this build
with open("%s/settings.xml" % builddir, 'w') as f:
f.write(SETTINGS % params)
| |
import time
import textmagic
from textmagic.test import ONE_TEST_NUMBER
from textmagic.test import THREE_TEST_NUMBERS
from textmagic.test import MAX_GSM0338_SMS_LENGTH
from textmagic.test import MAX_GSM0338_MULTI_SMS_LENGTH
from textmagic.test import A_GSM0338_CHARACTER
from textmagic.test import MAX_UNICODE_SMS_LENGTH
from textmagic.test import MAX_UNICODE_MULTI_SMS_LENGTH
from textmagic.test import A_UNICODE_CHARACTER
from textmagic.test import TextMagicTestsBase
from textmagic.test import LiveUnsafeTests
from textmagic.client import TextMagicError
class SendTestsBase(TextMagicTestsBase):
"""
Abstract class implementing a generic succeeding and failing "send" test
case.
"""
expected_keys = ['sent_text', 'message_id', 'parts_count']
def succeedingSendCase(self, message, numbers, expected_parts,
max_length=None, send_time=None, unicode=None, sender=None):
response = self.client._send(message, numbers, max_length, send_time, unicode, sender)
if not isinstance(numbers, list):
numbers=[numbers]
expected_keys = list(self.expected_keys)
if sender is not None:
expected_keys.extend(['from'])
self.assertKeysEqualExpectedKeys(response, expected_keys)
self.assertEquals(response['sent_text'], message)
self.assertEquals(len(response['message_id']), len(numbers))
self.assertEquals(set(response['message_id'].values()), set(numbers))
for message_id in response['message_id']:
self.assertTrue(message_id.isdigit())
self.assertEquals(response['parts_count'], expected_parts)
def failingSendCase(self, message, numbers, error_code, error_message,
max_length=None, send_time=None, unicode=None, sender=None):
try:
self.client._send(message, numbers, max_length, send_time, unicode, sender)
self.fail('An error is expected to skip this line')
except TextMagicError, e:
self.assertEquals(e.error_code, error_code)
self.assertEquals(e.error_message, error_message)
class BasicSendTests(SendTestsBase):
"""
Test the very basics.
"""
def testOneShortMessageSucceeds(self):
self.succeedingSendCase(
message='Test Message',
numbers=ONE_TEST_NUMBER,
expected_parts=1)
def testThreeShortMessagesSucceed(self):
self.succeedingSendCase(
message='Test Message',
numbers=THREE_TEST_NUMBERS,
expected_parts=1)
def testOneShortUnicodeMessageSucceeds(self):
self.succeedingSendCase(
message=u'\u2800\u2801\u2802\u2803 \u27F0',
numbers=ONE_TEST_NUMBER,
expected_parts=1)
def testSendCanBeCalledWithoutOptionalParametersGsm0338(self):
message = 'Test Message'
number = ONE_TEST_NUMBER
response = self.client.send(message, number)
self.assertKeysEqualExpectedKeys(response, self.expected_keys)
self.assertEquals(response['sent_text'], message)
self.assertEquals(len(response['message_id']), 1)
def testSendCanBeCalledWithoutOptionalParametersUnicode(self):
message = u'\u2800\u2801\u2802\u2803 \u27F0'
number = ONE_TEST_NUMBER
response = self.client.send(message, number)
self.assertKeysEqualExpectedKeys(response, self.expected_keys)
self.assertEquals(response['sent_text'], message)
self.assertEquals(len(response['message_id']), 1)
class LiveUnsafeBasicSendTests(BasicSendTests, LiveUnsafeTests):
"""
Test the very basics - but needing server-setup to work
"""
def testOneShortMessageWithSenderSucceeds(self):
self.succeedingSendCase(
message='Test Message',
numbers=ONE_TEST_NUMBER,
expected_parts=1,
sender='xyz')
class MultipartSendTests(SendTestsBase):
"""
Abstract class to test message lengths.
This class must be extended to test different character sets
"""
def succeedingSendLengthCase(self, length, expected_parts):
self.succeedingSendCase(
message=self.char*length,
numbers=ONE_TEST_NUMBER,
expected_parts=expected_parts)
def testLongestOnePartMessageSucceeds(self):
self.succeedingSendLengthCase(self.max_sms_length, 1)
def testShortestTwoPartMessageSucceeds(self):
self.succeedingSendLengthCase(self.max_sms_length+1, 2)
def testLongestTwoPartMessageSucceeds(self):
self.succeedingSendLengthCase(self.max_multi_sms_length*2, 2)
def testShortestThreePartMessageSucceeds(self):
self.succeedingSendLengthCase((self.max_multi_sms_length*2)+1, 3)
def testLongestThreePartMessageSucceeds(self):
self.succeedingSendLengthCase(self.max_multi_sms_length*3, 3)
class MultipartGsm0338SendTests(MultipartSendTests):
max_sms_length = MAX_GSM0338_SMS_LENGTH
max_multi_sms_length = MAX_GSM0338_MULTI_SMS_LENGTH
char = A_GSM0338_CHARACTER
class MultipartUnicodeSendTests(MultipartSendTests):
max_sms_length = MAX_UNICODE_SMS_LENGTH
max_multi_sms_length = MAX_UNICODE_MULTI_SMS_LENGTH
char = A_UNICODE_CHARACTER
class MaxLengthSendTests(SendTestsBase):
def testTooLongMessageErrorWhenMaxLengthIsOne(self):
self.failingSendCase(
message=A_GSM0338_CHARACTER*MAX_GSM0338_MULTI_SMS_LENGTH*2,
numbers=ONE_TEST_NUMBER,
error_code=7,
error_message='Too long message',
max_length = 1)
def testTooLongMessageErrorWhenMaxLengthIsTwo(self):
self.failingSendCase(
message=A_GSM0338_CHARACTER*MAX_GSM0338_MULTI_SMS_LENGTH*3,
numbers=ONE_TEST_NUMBER,
error_code=7,
error_message='Too long message',
max_length = 2)
def testOnePartMessageFailsWhenMaxLengthIsZero(self):
self.failingSendCase(
message=A_GSM0338_CHARACTER*MAX_GSM0338_SMS_LENGTH,
numbers=ONE_TEST_NUMBER,
max_length = 0,
error_code=10,
error_message='Wrong parameter value 0 for parameter max_length')
def testTwoPartMessageFailsWhenMaxLengthIsZero(self):
self.failingSendCase(
message=A_GSM0338_CHARACTER*MAX_GSM0338_MULTI_SMS_LENGTH*2,
numbers=ONE_TEST_NUMBER,
max_length = 0,
error_code=10,
error_message='Wrong parameter value 0 for parameter max_length')
def testThreePartMessageSucceedsWhenMaxLengthIsUnspecified(self):
self.succeedingSendCase(
message=A_GSM0338_CHARACTER*MAX_GSM0338_MULTI_SMS_LENGTH*3,
numbers=ONE_TEST_NUMBER,
expected_parts=3)
class SendCharacterSetsTests(SendTestsBase):
def testEscapedCharactersLengthenMessage(self):
escaped_chars = '{}\~[]|'
for escaped_char in escaped_chars:
message='a'*(MAX_GSM0338_SMS_LENGTH-1)+escaped_char
self.assertEquals(len(message), MAX_GSM0338_SMS_LENGTH)
self.succeedingSendCase(
message=message,
numbers=ONE_TEST_NUMBER,
expected_parts=2)
class SendTimeTests(SendTestsBase):
def _time_now(self):
if textmagic.test.running_live:
return time.time()
else:
return 1245879223
def _sendTimeInFutureSucceeds(self, send_time):
message = 'Message from the future'
self.succeedingSendCase(
message=message,
numbers=ONE_TEST_NUMBER,
expected_parts=1,
send_time=send_time)
def testSendTimeAsStructTimeInFutureSucceeds(self):
self._sendTimeInFutureSucceeds(time.localtime(self._time_now()+120))
def testSendTimeAsStructTimeInPastSucceeds(self):
self._sendTimeInFutureSucceeds(time.localtime(self._time_now()-300))
def testSendTimeAsUnixTimeInFutureSucceeds(self):
self._sendTimeInFutureSucceeds(self._time_now()+120)
def testSendTimeAsUnixTimeInPastSucceeds(self):
self._sendTimeInFutureSucceeds(self._time_now()-300)
class SendErrorsTests(SendTestsBase):
"""
Test error messages on sending.
"""
def testEmptyMessageFails(self):
self.failingSendCase(
message='',
numbers=ONE_TEST_NUMBER,
error_code=1,
error_message='Messages text is empty')
def testWrongPhoneNumberFormatFails(self):
self.failingSendCase(
message='Error testing message',
numbers=['1234'],
error_code=9,
error_message='Wrong phone number format')
def testWrongPasswordFails(self):
self.client = textmagic.test.client_class(self.client.username, 'koos')
self.failingSendCase(
message='Wrong password testing message',
numbers=ONE_TEST_NUMBER,
error_code=5,
error_message='Invalid username & password combination')
def testWrongValueForUnicodeParameterFails(self):
self.failingSendCase(
message='Error testing message',
numbers=ONE_TEST_NUMBER,
unicode=10,
error_code=10,
error_message='Wrong parameter value 10 for parameter unicode')
def testUnicodeMessageThatSaysNotUnicodeReportsTooLongUnicodeMessageReturnsError(self):
self.failingSendCase(
message=u'\uABCD'*(MAX_GSM0338_MULTI_SMS_LENGTH),
numbers=ONE_TEST_NUMBER,
unicode=0,
error_code=15,
error_message='Unicode symbols detected')
def testGsm0338MessageThatSaysUnicodeSentAsGsm0338(self):
self.succeedingSendCase(
message='x'*(MAX_UNICODE_SMS_LENGTH*3),
numbers=ONE_TEST_NUMBER,
unicode=1,
expected_parts=2)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
import Axon
import zlib
import os
import pygame
from datetime import datetime
from zipfile import ZipFile
from Tkinter import *
from tkFileDialog import askopenfilename
from tkSimpleDialog import askstring
from tkMessageBox import *
from Axon.Ipc import WaitComplete, producerFinished, shutdownMicroprocess
from Kamaelia.UI.PygameDisplay import PygameDisplay
try:
import Image
except ImportError:
print "WARNING: Python Imaging Library Not available, defaulting to bmp only mode"
class Canvas(Axon.Component.component):
"""\
Canvas component - pygame surface that accepts drawing instructions
"""
Inboxes = { "inbox" : "Receives drawing instructions",
"control" : "",
"fromDisplay" : "For receiving replies from PygameDisplay service",
"eventsIn" : "For receiving PygameDisplay events",
}
Outboxes = { "outbox" : "Issues drawing instructions",
"signal" : "",
"toDisplay" : "For sending requests to PygameDisplay service",
"toApp" : "Send requests to app - for calibration", # MODIFICATION
"eventsOut" : "Events forwarded out of here",
"surfacechanged" : "If the surface gets changed from last load/save a 'dirty' message is emitted here",
"toTicker" : "Send data to text ticker",
"toHistory" : "Move to first slide",
}
def __init__(self, position=(0,0), size=(1024,768), bgcolour=(255,255,255), notepad="Scribbles"):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(Canvas,self).__init__()
self.position = position
self.size = size
self.antialias = False
self.bgcolour = bgcolour
self.notepad = notepad
if self.antialias == True:
self.pygame_draw_line = pygame.draw.aaline
else:
self.pygame_draw_line = pygame.draw.line
self.dirty_sent = False
def waitBox(self,boxname):
waiting = True
while waiting:
if self.dataReady(boxname):
return
else:
yield 1
def requestDisplay(self, **argd):
displayservice = PygameDisplay.getDisplayService()
self.link((self,"toDisplay"), displayservice)
#argd["transparency"] = self.bgcolour
self.send(argd, "toDisplay")
self.send(argd, "toApp") # MODIFICATION
for _ in self.waitBox("fromDisplay"):
yield 1
self.surface = self.recv("fromDisplay")
def finished(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
return True
return False
def main(self):
"""Main loop"""
yield 1
yield 1
yield 1
yield 1
yield WaitComplete(
self.requestDisplay( DISPLAYREQUEST=True,
callback = (self,"fromDisplay"),
events = (self, "eventsIn"),
size = self.size,
position = self.position,
)
)
self.surface.fill( (self.bgcolour) )
self.send({"REDRAW":True, "surface":self.surface}, "toDisplay")
self.send({"REDRAW":True, "surface":self.surface}, "toApp") # MODIFICATION
self.send( {"ADDLISTENEVENT" : pygame.MOUSEBUTTONDOWN, "surface" : self.surface},
"toDisplay" )
self.send( {"ADDLISTENEVENT" : pygame.MOUSEMOTION, "surface" : self.surface},
"toDisplay" )
self.send( {"ADDLISTENEVENT" : pygame.MOUSEBUTTONUP, "surface" : self.surface},
"toDisplay" )
while not self.finished():
self.redrawNeeded = False
while self.dataReady("inbox"):
msgs = self.recv("inbox")
# \
# print repr(msgs)
for msg in msgs:
cmd = msg[0]
args = msg[1:]
# parse commands here
self.handleCommand(cmd, *args)
yield 1
if self.redrawNeeded:
self.send({"REDRAW":True, "surface":self.surface}, "toDisplay")
self.send({"REDRAW":True, "surface":self.surface}, "toApp") #MODIFICATION
if not self.clean:
if not self.dirty_sent:
self.send("dirty", "surfacechanged")
self.dirty_sent = True
# pass on events received from pygame display
while self.dataReady("eventsIn"):
self.send( self.recv("eventsIn"), "eventsOut" )
self.pause()
yield 1
def handleCommand(self, cmd, *args):
#
# Could really take a dispatch pattern
# Would then be pluggable.
#
cmd = cmd.upper()
if cmd=="CLEAR":
self.clear(args)
self.clean = True
self.dirty_sent = False
elif cmd=="LINE":
self.line(args)
elif cmd=="CIRCLE":
self.circle(args)
self.clean = False
elif cmd=="LOAD":
self.load(args)
self.clean = True
self.dirty_sent = False
elif cmd=="SAVE":
self.save(args)
self.clean = True
self.dirty_sent = False
elif cmd=="LOADDECK":
self.loaddeck(args)
self.clean = True
self.dirty_sent = False
elif cmd=="SAVEDECK":
self.savedeck(args)
self.clean = True
self.dirty_sent = False
elif cmd=="CLEARSCRIBBLES":
self.clearscribbles(args)
self.clean = True
self.dirty_sent = False
elif cmd=="DELETESLIDE":
self.deleteslide(args)
self.clean = True
self.dirty_sent = False
elif cmd=="GETIMG":
self.getimg(args)
self.clean = False
elif cmd=="SETIMG":
self.setimg(args)
self.clean = False
elif cmd=="WRITE":
self.write(args)
self.clean = False
elif cmd=="CAM":
self.webcam(args)
self.clean = True
self.dirty_sent = True
elif cmd== "QUIT":
self.quit(args)
def line(self, args):
(r,g,b,sx,sy,ex,ey) = [int(v) for v in args[0:7]]
self.pygame_draw_line(self.surface, (r,g,b), (sx,sy), (ex,ey))
# pygame.draw.aaline(self.surface, (r,g,b), (sx,sy), (ex,ey))
self.redrawNeeded = True
if not((sy <0) or (ey <0)):
self.clean = False
def clear(self, args):
if len(args) == 3:
self.surface.fill( [int(a) for a in args[0:3]] )
else:
self.surface.fill( (self.bgcolour) )
self.redrawNeeded = True
self.send("dirty", "surfacechanged")
self.dirty_sent = True
self.clean = True
def circle(self, args):
(r,g,b,x,y,radius) = [int(v) for v in args[0:6]]
pygame.draw.circle(self.surface, (r,g,b), (x,y), radius, 0)
self.redrawNeeded = True
def load(self, args):
filename = args[0]
# print "ARGS", args
try:
loadedimage = pygame.image.load(filename)
except:
pass
else:
self.surface.blit(loadedimage, (0,0))
self.redrawNeeded = True
if not ( (len(args) >1) and args[1] == "nopropogate" ):
self.getimg(())
self.clean = True
def save(self, args):
filename = args[0]
try:
imagestring = pygame.image.tostring(self.surface,"RGB")
pilImage = Image.fromstring("RGB", self.surface.get_size(), imagestring)
pilImage.save(filename)
except NameError:
pygame.image.save(self.surface, filename)
self.clean = True
def loaddeck(self, args):
root = Tk()
root.withdraw()
filename = askopenfilename(filetypes=[("Zip Archives",".zip")],initialdir="Decks",title="Load Slide Deck",parent=root)
root.destroy()
root = Tk()
root.withdraw()
password = askstring("Deck Password","Please enter the password for this zip file, or leave blank if there is no password:", parent=root)
root.destroy()
if (filename):
try:
unzipped = ZipFile(filename)
self.clearscribbles("")
unzipped.extractall(path=self.notepad,pwd=password)
files = os.listdir(self.notepad)
files.sort()
loadstring = self.notepad + "/" + files[0]
self.send("first", "toHistory")
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Deck loaded successfully","toTicker")
except Exception, e:
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Failed to open the deck specified. You may have entered the password incorrectly","toTicker")
self.clean = True
def savedeck(self, args):
dt = datetime.now()
filename = dt.strftime("%Y%m%d-%H%M%S")
filename = filename + ".zip"
num_pages = len(os.listdir(self.notepad))
root = Tk()
root.withdraw()
password = askstring("Deck Password","Please enter a password for the zip file, or leave blank for no password:", parent=root)
root.destroy()
try:
if (password != ""):
#os.system("zip", "-j", "-q", "-P " + password, "Decks/" + filename, self.notepad + "/*.png")
os.system("zip -j -q -P " + password + " Decks/" + filename + " " + self.notepad + "/*.png")
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Zip file 'Decks/" + filename + "' created successfully with password","toTicker")
else:
os.system("zip -j -q Decks/" + filename + " " + self.notepad + "/*.png")
"""zipped = ZipFile('Decks/' + filename,'w') # This seems to have broken
for x in range(num_pages + 1):
if (x > 0):
zipped.write(self.notepad + "/" + "slide." + str(x) + ".png", "slide." + str(x) + ".png")
zipped.close()"""
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Zip file 'Decks/" + filename + "' created successfully without password","toTicker")
except Exception, e:
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Failed to write to zip file 'Decks/" + filename + "'","toTicker")
self.clean = True
def clearscribbles(self, args):
try:
#for x in os.listdir(self.notepad):
for x in os.listdir(self.notepad):
if (os.path.splitext(x)[1] == ".png"):
os.remove(self.notepad + "/" + x)
self.clear("")
self.send("first", "toHistory")
except Exception, e:
pass
self.clean = True
def deleteslide(self, args):
self.clear("")
self.send("delete", "toHistory")
self.clean = True
def getimg(self, args):
imagestring = pygame.image.tostring(self.surface,"RGB")
imagestring = zlib.compress(imagestring)
w,h = self.surface.get_size()
self.send( [["SETIMG",imagestring,str(w),str(h),"RGB"]], "outbox" )
# print "GETIMG"
def setimg(self, args):
w,h = int(args[1]), int(args[2])
imagestring = zlib.decompress(args[0])
recvsurface = pygame.image.frombuffer(imagestring, (w,h), args[3])
self.surface.blit(recvsurface, (0,0))
self.redrawNeeded = True
def write(self, args):
x,y,size,r,g,b = [int(a) for a in args[0:6]]
text = args[6]
font = pygame.font.Font(None,size)
textimg = font.render(text, self.antialias, (r,g,b))
self.surface.blit(textimg, (x,y))
self.redrawNeeded = True
def webcam(self, args):
snapshot = args[0]
imageorigin = args[1]
location = args[2]
self.surface.blit(snapshot, imageorigin) # temp
if (location == "local"):
imageorigin = (imageorigin[0], imageorigin[1] + 141)
self.surface.blit(snapshot, imageorigin)
self.redrawNeeded = True
self.send({"REDRAW":True, "surface":self.surface}, "toDisplay")
#self.send("dirty", "surfacechanged")
def quit(self, args):
root = Tk()
root.withdraw()
kill = False
if (askyesno("Confirm","Unsaved changes will be lost. Are you sure you want to quit?",parent=root)):
# perform quit
kill = True
#pygame.quit() # This isn't the right way to do it!
# Also, saving won't work as the program exits before it's happened
root.destroy()
if (kill):
print("Exiting")
self.scheduler.stop()
| |
from __future__ import with_statement
import datetime
try:
import urlparse
except ImportError:
from urllib import parse as urlparse
from flask import get_flashed_messages
from flask import request
from flask import session
from flask import url_for
from flask_peewee.auth import Auth
from flask_peewee.auth import LoginForm
from flask_peewee.tests.base import FlaskPeeweeTestCase
from flask_peewee.tests.test_app import User
from flask_peewee.tests.test_app import app
from flask_peewee.tests.test_app import auth
from flask_peewee.tests.test_app import db
class TestAuth(Auth):
def setup(self):
pass
class AuthTestCase(FlaskPeeweeTestCase):
def setUp(self):
super(AuthTestCase, self).setUp()
self.test_auth = TestAuth(app, db)
def login(self, username='admin', password='admin', context=None):
context = context or self.app
return context.post('/accounts/login/', data={
'username': username,
'password': password,
})
def logout(self, context=None):
context = context or self.app
return context.post('/accounts/logout/')
def test_table(self):
self.assertEqual(self.test_auth.User._meta.table_name, 'user')
fake_auth = TestAuth(app, db, db_table='peewee_users')
self.assertEqual(fake_auth.User._meta.table_name, 'peewee_users')
def test_login_view(self):
self.create_users()
with self.flask_app.test_client() as c:
resp = c.get('/accounts/login/')
self.assertEqual(resp.status_code, 200)
# check that we have no logged-in user
self.assertContext('user', None)
frm = self.get_context('form')
self.assertTrue(isinstance(frm, LoginForm))
self.assertEqual(frm.data, {'username': None, 'password': None})
# make a post missing the username
resp = c.post('/accounts/login/', data={
'username': '',
'password': 'xxx',
})
self.assertEqual(resp.status_code, 200)
# check form for errors
frm = self.get_context('form')
self.assertEqual(frm.errors, {'username': [u'This field is required.']})
# check that no messages were generated
self.assertFalse('_flashes' in session)
# check that the auth API does not indicate a logged-in user
self.assertEqual(auth.get_logged_in_user(), None)
# make a post with a bad username/password combo
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'baz',
})
self.assertEqual(resp.status_code, 200)
# both fields were present so no form errors, but flash the user
# indicating bad username/password combo
self.assertTrue('_flashes' in session)
messages = get_flashed_messages()
self.assertEqual(messages, [
'Incorrect username or password',
])
# check that the auth API does not indicate a logged-in user
self.assertEqual(auth.get_logged_in_user(), None)
# make a post with an inactive user
resp = c.post('/accounts/login/', data={
'username': 'inactive',
'password': 'inactive',
})
self.assertEqual(resp.status_code, 200)
# still no logged-in user
self.assertContext('user', None)
# check that the auth API does not indicate a logged-in user
self.assertEqual(auth.get_logged_in_user(), None)
# finally post as a known good user
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'normal',
})
self.assertEqual(resp.status_code, 302)
# check that we now have a logged-in user
self.assertEqual(auth.get_logged_in_user(), self.normal)
def test_login_redirect_in_depth(self):
self.create_users()
with self.flask_app.test_client() as c:
resp = c.get('/admin/')
location = resp.location
parsed = urlparse.urlparse(location)
querystring = urlparse.parse_qs(parsed.query)
self.assertEqual(querystring, {'next': ['/admin/']})
# Following the redirect, the next url is passed to context.
location = location.replace('http://localhost', '')
resp = c.get(location)
self.assertEqual(self.get_context('next'), '/admin/')
# Simulate incorrect password.
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'incorrect-password',
'next': '/admin/',
})
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.get_context('next'), '/admin/')
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'normal',
'next': '/admin/',
})
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.headers['location'].endswith('/admin/'))
def test_login_default_redirect(self):
self.create_users()
with self.flask_app.test_client() as c:
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'normal',
})
self.assertEqual(resp.status_code, 302)
location = resp.location.replace('http://localhost', '')
self.assertTrue(location, '/')
def test_login_redirect(self):
self.create_users()
with self.flask_app.test_client() as c:
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'normal',
'next': '/foo-baz/',
})
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.headers['location'].endswith('/foo-baz/'))
def test_login_logout(self):
self.create_users()
with self.flask_app.test_client() as c:
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'normal',
})
self.assertEqual(auth.get_logged_in_user(), self.normal)
resp = c.post('/accounts/logout/')
self.assertEqual(auth.get_logged_in_user(), None)
resp = c.post('/accounts/login/', data={
'username': 'admin',
'password': 'admin',
})
self.assertEqual(auth.get_logged_in_user(), self.admin)
# log back in without logging out
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'normal',
})
self.assertEqual(auth.get_logged_in_user(), self.normal)
def test_login_required(self):
self.create_users()
with self.flask_app.test_client() as c:
resp = c.get('/private/')
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.headers['location'].endswith('/accounts/login/?next=%2Fprivate%2F'))
self.login('normal', 'normal', c)
resp = c.get('/private/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(auth.get_logged_in_user(), self.normal)
self.login('admin', 'admin', c)
resp = c.get('/private/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(auth.get_logged_in_user(), self.admin)
def test_admin_required(self):
self.create_users()
with self.flask_app.test_client() as c:
resp = c.get('/secret/')
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.headers['location'].endswith('/accounts/login/?next=%2Fsecret%2F'))
self.login('normal', 'normal', c)
resp = c.get('/secret/')
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.headers['location'].endswith('/accounts/login/?next=%2Fsecret%2F'))
self.assertEqual(auth.get_logged_in_user(), self.normal)
self.login('admin', 'admin', c)
resp = c.get('/secret/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(auth.get_logged_in_user(), self.admin)
| |
"""Provides device automations for MQTT."""
from __future__ import annotations
import logging
from typing import Callable
import attr
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.const import (
CONF_DEVICE,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_PLATFORM,
CONF_TYPE,
CONF_VALUE_TEMPLATE,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import CONF_PAYLOAD, CONF_QOS, DOMAIN, debug_info, trigger as mqtt_trigger
from .. import mqtt
from .const import ATTR_DISCOVERY_HASH, ATTR_DISCOVERY_TOPIC
from .discovery import MQTT_DISCOVERY_DONE, MQTT_DISCOVERY_UPDATED, clear_discovery_hash
from .mixins import (
CONF_CONNECTIONS,
CONF_IDENTIFIERS,
MQTT_ENTITY_DEVICE_INFO_SCHEMA,
cleanup_device_registry,
device_info_from_config,
validate_device_has_at_least_one_identifier,
)
_LOGGER = logging.getLogger(__name__)
CONF_AUTOMATION_TYPE = "automation_type"
CONF_DISCOVERY_ID = "discovery_id"
CONF_SUBTYPE = "subtype"
CONF_TOPIC = "topic"
DEFAULT_ENCODING = "utf-8"
DEVICE = "device"
MQTT_TRIGGER_BASE = {
# Trigger when MQTT message is received
CONF_PLATFORM: DEVICE,
CONF_DOMAIN: DOMAIN,
}
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): DEVICE,
vol.Required(CONF_DOMAIN): DOMAIN,
vol.Required(CONF_DEVICE_ID): str,
vol.Required(CONF_DISCOVERY_ID): str,
vol.Required(CONF_TYPE): cv.string,
vol.Required(CONF_SUBTYPE): cv.string,
}
)
TRIGGER_DISCOVERY_SCHEMA = mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_AUTOMATION_TYPE): str,
vol.Required(CONF_DEVICE): MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_PAYLOAD, default=None): vol.Any(None, cv.string),
vol.Required(CONF_SUBTYPE): cv.string,
vol.Required(CONF_TOPIC): cv.string,
vol.Required(CONF_TYPE): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE, default=None): vol.Any(None, cv.string),
},
validate_device_has_at_least_one_identifier,
)
DEVICE_TRIGGERS = "mqtt_device_triggers"
@attr.s(slots=True)
class TriggerInstance:
"""Attached trigger settings."""
action: AutomationActionType = attr.ib()
automation_info: dict = attr.ib()
trigger: Trigger = attr.ib()
remove: CALLBACK_TYPE | None = attr.ib(default=None)
async def async_attach_trigger(self):
"""Attach MQTT trigger."""
mqtt_config = {
mqtt_trigger.CONF_PLATFORM: mqtt.DOMAIN,
mqtt_trigger.CONF_TOPIC: self.trigger.topic,
mqtt_trigger.CONF_ENCODING: DEFAULT_ENCODING,
mqtt_trigger.CONF_QOS: self.trigger.qos,
}
if self.trigger.payload:
mqtt_config[CONF_PAYLOAD] = self.trigger.payload
if self.trigger.value_template:
mqtt_config[CONF_VALUE_TEMPLATE] = self.trigger.value_template
mqtt_config = mqtt_trigger.TRIGGER_SCHEMA(mqtt_config)
if self.remove:
self.remove()
self.remove = await mqtt_trigger.async_attach_trigger(
self.trigger.hass,
mqtt_config,
self.action,
self.automation_info,
)
@attr.s(slots=True)
class Trigger:
"""Device trigger settings."""
device_id: str = attr.ib()
discovery_data: dict = attr.ib()
hass: HomeAssistantType = attr.ib()
payload: str = attr.ib()
qos: int = attr.ib()
remove_signal: Callable[[], None] = attr.ib()
subtype: str = attr.ib()
topic: str = attr.ib()
type: str = attr.ib()
value_template: str = attr.ib()
trigger_instances: list[TriggerInstance] = attr.ib(factory=list)
async def add_trigger(self, action, automation_info):
"""Add MQTT trigger."""
instance = TriggerInstance(action, automation_info, self)
self.trigger_instances.append(instance)
if self.topic is not None:
# If we know about the trigger, subscribe to MQTT topic
await instance.async_attach_trigger()
@callback
def async_remove() -> None:
"""Remove trigger."""
if instance not in self.trigger_instances:
raise HomeAssistantError("Can't remove trigger twice")
if instance.remove:
instance.remove()
self.trigger_instances.remove(instance)
return async_remove
async def update_trigger(self, config, discovery_hash, remove_signal):
"""Update MQTT device trigger."""
self.remove_signal = remove_signal
self.type = config[CONF_TYPE]
self.subtype = config[CONF_SUBTYPE]
self.payload = config[CONF_PAYLOAD]
self.qos = config[CONF_QOS]
topic_changed = self.topic != config[CONF_TOPIC]
self.topic = config[CONF_TOPIC]
self.value_template = config[CONF_VALUE_TEMPLATE]
# Unsubscribe+subscribe if this trigger is in use and topic has changed
# If topic is same unsubscribe+subscribe will execute in the wrong order
# because unsubscribe is done with help of async_create_task
if topic_changed:
for trig in self.trigger_instances:
await trig.async_attach_trigger()
def detach_trigger(self):
"""Remove MQTT device trigger."""
# Mark trigger as unknown
self.topic = None
# Unsubscribe if this trigger is in use
for trig in self.trigger_instances:
if trig.remove:
trig.remove()
trig.remove = None
async def _update_device(hass, config_entry, config):
"""Update device registry."""
device_registry = await hass.helpers.device_registry.async_get_registry()
config_entry_id = config_entry.entry_id
device_info = device_info_from_config(config[CONF_DEVICE])
if config_entry_id is not None and device_info is not None:
device_info["config_entry_id"] = config_entry_id
device_registry.async_get_or_create(**device_info)
async def async_setup_trigger(hass, config, config_entry, discovery_data):
"""Set up the MQTT device trigger."""
config = TRIGGER_DISCOVERY_SCHEMA(config)
discovery_hash = discovery_data[ATTR_DISCOVERY_HASH]
discovery_id = discovery_hash[1]
remove_signal = None
async def discovery_update(payload):
"""Handle discovery update."""
_LOGGER.info(
"Got update for trigger with hash: %s '%s'", discovery_hash, payload
)
if not payload:
# Empty payload: Remove trigger
_LOGGER.info("Removing trigger: %s", discovery_hash)
debug_info.remove_trigger_discovery_data(hass, discovery_hash)
if discovery_id in hass.data[DEVICE_TRIGGERS]:
device_trigger = hass.data[DEVICE_TRIGGERS][discovery_id]
device_trigger.detach_trigger()
clear_discovery_hash(hass, discovery_hash)
remove_signal()
await cleanup_device_registry(hass, device.id)
else:
# Non-empty payload: Update trigger
_LOGGER.info("Updating trigger: %s", discovery_hash)
debug_info.update_trigger_discovery_data(hass, discovery_hash, payload)
config = TRIGGER_DISCOVERY_SCHEMA(payload)
await _update_device(hass, config_entry, config)
device_trigger = hass.data[DEVICE_TRIGGERS][discovery_id]
await device_trigger.update_trigger(config, discovery_hash, remove_signal)
async_dispatcher_send(hass, MQTT_DISCOVERY_DONE.format(discovery_hash), None)
remove_signal = async_dispatcher_connect(
hass, MQTT_DISCOVERY_UPDATED.format(discovery_hash), discovery_update
)
await _update_device(hass, config_entry, config)
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get_device(
{(DOMAIN, id_) for id_ in config[CONF_DEVICE][CONF_IDENTIFIERS]},
{tuple(x) for x in config[CONF_DEVICE][CONF_CONNECTIONS]},
)
if device is None:
async_dispatcher_send(hass, MQTT_DISCOVERY_DONE.format(discovery_hash), None)
return
if DEVICE_TRIGGERS not in hass.data:
hass.data[DEVICE_TRIGGERS] = {}
if discovery_id not in hass.data[DEVICE_TRIGGERS]:
hass.data[DEVICE_TRIGGERS][discovery_id] = Trigger(
hass=hass,
device_id=device.id,
discovery_data=discovery_data,
type=config[CONF_TYPE],
subtype=config[CONF_SUBTYPE],
topic=config[CONF_TOPIC],
payload=config[CONF_PAYLOAD],
qos=config[CONF_QOS],
remove_signal=remove_signal,
value_template=config[CONF_VALUE_TEMPLATE],
)
else:
await hass.data[DEVICE_TRIGGERS][discovery_id].update_trigger(
config, discovery_hash, remove_signal
)
debug_info.add_trigger_discovery_data(
hass, discovery_hash, discovery_data, device.id
)
async_dispatcher_send(hass, MQTT_DISCOVERY_DONE.format(discovery_hash), None)
async def async_device_removed(hass: HomeAssistant, device_id: str):
"""Handle the removal of a device."""
triggers = await async_get_triggers(hass, device_id)
for trig in triggers:
device_trigger = hass.data[DEVICE_TRIGGERS].pop(trig[CONF_DISCOVERY_ID])
if device_trigger:
discovery_hash = device_trigger.discovery_data[ATTR_DISCOVERY_HASH]
discovery_topic = device_trigger.discovery_data[ATTR_DISCOVERY_TOPIC]
debug_info.remove_trigger_discovery_data(hass, discovery_hash)
device_trigger.detach_trigger()
clear_discovery_hash(hass, discovery_hash)
device_trigger.remove_signal()
mqtt.publish(
hass,
discovery_topic,
"",
retain=True,
)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> list[dict]:
"""List device triggers for MQTT devices."""
triggers = []
if DEVICE_TRIGGERS not in hass.data:
return triggers
for discovery_id, trig in hass.data[DEVICE_TRIGGERS].items():
if trig.device_id != device_id or trig.topic is None:
continue
trigger = {
**MQTT_TRIGGER_BASE,
"device_id": device_id,
"type": trig.type,
"subtype": trig.subtype,
"discovery_id": discovery_id,
}
triggers.append(trigger)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
if DEVICE_TRIGGERS not in hass.data:
hass.data[DEVICE_TRIGGERS] = {}
device_id = config[CONF_DEVICE_ID]
discovery_id = config[CONF_DISCOVERY_ID]
if discovery_id not in hass.data[DEVICE_TRIGGERS]:
hass.data[DEVICE_TRIGGERS][discovery_id] = Trigger(
hass=hass,
device_id=device_id,
discovery_data=None,
remove_signal=None,
type=config[CONF_TYPE],
subtype=config[CONF_SUBTYPE],
topic=None,
payload=None,
qos=None,
value_template=None,
)
return await hass.data[DEVICE_TRIGGERS][discovery_id].add_trigger(
action, automation_info
)
| |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
import string
from mock import ANY
from mock import call
from mock import DEFAULT
from mock import MagicMock
from mock import Mock
from mock import NonCallableMagicMock
from mock import patch
from oslo_utils import netutils
from testtools import ExpectedException
from trove.common import exception
from trove.common.instance import ServiceStatuses
from trove.guestagent import backup
from trove.guestagent.common.configuration import ImportOverrideStrategy
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.cassandra import (
manager as cass_manager)
from trove.guestagent.datastore.experimental.cassandra import (
service as cass_service)
from trove.guestagent.db import models
from trove.guestagent import pkg as pkg
from trove.guestagent import volume
from trove.tests.unittests import trove_testtools
class GuestAgentCassandraDBManagerTest(trove_testtools.TestCase):
__MOUNT_POINT = '/var/lib/cassandra'
__N_GAK = '_get_available_keyspaces'
__N_GLU = '_get_listed_users'
__N_BU = '_build_user'
__N_RU = '_rename_user'
__N_AUP = '_alter_user_password'
__N_CAU = 'trove.guestagent.db.models.CassandraUser'
__N_CU = '_create_user'
__N_GFA = '_grant_full_access_on_keyspace'
__N_DU = '_drop_user'
__ACCESS_MODIFIERS = ('ALTER', 'CREATE', 'DROP', 'MODIFY', 'SELECT')
__CREATE_DB_FORMAT = (
"CREATE KEYSPACE \"{}\" WITH REPLICATION = "
"{{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }};"
)
__DROP_DB_FORMAT = "DROP KEYSPACE \"{}\";"
__CREATE_USR_FORMAT = "CREATE USER '{}' WITH PASSWORD %s NOSUPERUSER;"
__ALTER_USR_FORMAT = "ALTER USER '{}' WITH PASSWORD %s;"
__DROP_USR_FORMAT = "DROP USER '{}';"
__GRANT_FORMAT = "GRANT {} ON KEYSPACE \"{}\" TO '{}';"
__REVOKE_FORMAT = "REVOKE ALL PERMISSIONS ON KEYSPACE \"{}\" FROM '{}';"
__LIST_PERMISSIONS_FORMAT = "LIST ALL PERMISSIONS NORECURSIVE;"
__LIST_PERMISSIONS_OF_FORMAT = "LIST ALL PERMISSIONS OF '{}' NORECURSIVE;"
__LIST_DB_FORMAT = "SELECT * FROM system.schema_keyspaces;"
__LIST_USR_FORMAT = "LIST USERS;"
@patch.object(ImportOverrideStrategy, '_initialize_import_directory')
@patch('trove.guestagent.datastore.experimental.cassandra.service.LOG')
def setUp(self, *args, **kwargs):
super(GuestAgentCassandraDBManagerTest, self).setUp()
self.real_status = cass_service.CassandraAppStatus.set_status
class FakeInstanceServiceStatus(object):
status = ServiceStatuses.NEW
def save(self):
pass
cass_service.CassandraAppStatus.set_status = MagicMock(
return_value=FakeInstanceServiceStatus())
self.context = trove_testtools.TroveTestContext(self)
self.manager = cass_manager.Manager()
self.manager._Manager__admin = cass_service.CassandraAdmin(
models.CassandraUser('Test'))
self.admin = self.manager._Manager__admin
self.pkg = cass_service.packager
self.origin_os_path_exists = os.path.exists
self.origin_format = volume.VolumeDevice.format
self.origin_migrate_data = volume.VolumeDevice.migrate_data
self.origin_mount = volume.VolumeDevice.mount
self.origin_mount_points = volume.VolumeDevice.mount_points
self.origin_stop_db = cass_service.CassandraApp.stop_db
self.origin_start_db = cass_service.CassandraApp.start_db
self.origin_install_db = cass_service.CassandraApp._install_db
self.original_get_ip = netutils.get_my_ipv4
self.orig_make_host_reachable = (
cass_service.CassandraApp.apply_initial_guestagent_configuration)
def tearDown(self):
super(GuestAgentCassandraDBManagerTest, self).tearDown()
cass_service.packager = self.pkg
os.path.exists = self.origin_os_path_exists
volume.VolumeDevice.format = self.origin_format
volume.VolumeDevice.migrate_data = self.origin_migrate_data
volume.VolumeDevice.mount = self.origin_mount
volume.VolumeDevice.mount_points = self.origin_mount_points
cass_service.CassandraApp.stop_db = self.origin_stop_db
cass_service.CassandraApp.start_db = self.origin_start_db
cass_service.CassandraApp._install_db = self.origin_install_db
netutils.get_my_ipv4 = self.original_get_ip
cass_service.CassandraApp.apply_initial_guestagent_configuration = (
self.orig_make_host_reachable)
cass_service.CassandraAppStatus.set_status = self.real_status
def test_update_status(self):
mock_status = MagicMock()
self.manager.app.status = mock_status
self.manager.update_status(self.context)
mock_status.update.assert_any_call()
def test_prepare_pkg(self):
self._prepare_dynamic(['cassandra'])
def test_prepare_no_pkg(self):
self._prepare_dynamic([])
def test_prepare_db_not_installed(self):
self._prepare_dynamic([], is_db_installed=False)
def test_prepare_db_not_installed_no_package(self):
self._prepare_dynamic([],
is_db_installed=True)
@patch.object(backup, 'restore')
def test_prepare_db_restore(self, restore):
backup_info = {'id': 'backup_id',
'instance_id': 'fake-instance-id',
'location': 'fake-location',
'type': 'InnoBackupEx',
'checksum': 'fake-checksum'}
self._prepare_dynamic(['cassandra'], is_db_installed=False,
backup_info=backup_info)
restore.assert_called_once_with(
self.context, backup_info, self.__MOUNT_POINT)
@patch.multiple(operating_system, enable_service_on_boot=DEFAULT,
disable_service_on_boot=DEFAULT)
@patch('trove.guestagent.datastore.experimental.cassandra.service.LOG')
def test_superuser_password_reset(
self, _, enable_service_on_boot, disable_service_on_boot):
fake_status = MagicMock()
fake_status.is_running = False
test_app = cass_service.CassandraApp()
test_app.status = fake_status
with patch.multiple(
test_app,
start_db=DEFAULT,
stop_db=DEFAULT,
restart=DEFAULT,
_CassandraApp__disable_remote_access=DEFAULT,
_CassandraApp__enable_remote_access=DEFAULT,
_CassandraApp__disable_authentication=DEFAULT,
_CassandraApp__enable_authentication=DEFAULT,
_CassandraApp__reset_user_password_to_default=DEFAULT,
secure=DEFAULT) as calls:
test_app._reset_admin_password()
disable_service_on_boot.assert_called_once_with(
test_app.service_candidates)
calls[
'_CassandraApp__disable_remote_access'
].assert_called_once_with()
calls[
'_CassandraApp__disable_authentication'
].assert_called_once_with()
calls['start_db'].assert_called_once_with(update_db=False,
enable_on_boot=False),
calls[
'_CassandraApp__enable_authentication'
].assert_called_once_with()
pw_reset_mock = calls[
'_CassandraApp__reset_user_password_to_default'
]
pw_reset_mock.assert_called_once_with(test_app._ADMIN_USER)
calls['secure'].assert_called_once_with(
update_user=pw_reset_mock.return_value)
calls['restart'].assert_called_once_with()
calls['stop_db'].assert_called_once_with()
calls[
'_CassandraApp__enable_remote_access'
].assert_called_once_with()
enable_service_on_boot.assert_called_once_with(
test_app.service_candidates)
@patch('trove.guestagent.datastore.experimental.cassandra.service.LOG')
def test_change_cluster_name(self, _):
fake_status = MagicMock()
fake_status.is_running = True
test_app = cass_service.CassandraApp()
test_app.status = fake_status
with patch.multiple(
test_app,
start_db=DEFAULT,
stop_db=DEFAULT,
restart=DEFAULT,
_update_cluster_name_property=DEFAULT,
_CassandraApp__reset_cluster_name=DEFAULT) as calls:
sample_name = NonCallableMagicMock()
test_app.change_cluster_name(sample_name)
calls['_CassandraApp__reset_cluster_name'].assert_called_once_with(
sample_name)
calls['_update_cluster_name_property'].assert_called_once_with(
sample_name)
calls['restart'].assert_called_once_with()
@patch.object(cass_service, 'CONF', DEFAULT)
@patch('trove.guestagent.datastore.experimental.cassandra.service.LOG')
def test_apply_post_restore_updates(self, _, conf_mock):
fake_status = MagicMock()
fake_status.is_running = False
test_app = cass_service.CassandraApp()
test_app.status = fake_status
with patch.multiple(
test_app,
start_db=DEFAULT,
stop_db=DEFAULT,
_update_cluster_name_property=DEFAULT,
_reset_admin_password=DEFAULT,
change_cluster_name=DEFAULT) as calls:
backup_info = {'instance_id': 'old_id'}
conf_mock.guest_id = 'new_id'
test_app._apply_post_restore_updates(backup_info)
calls['_update_cluster_name_property'].assert_called_once_with(
'old_id')
calls['_reset_admin_password'].assert_called_once_with()
calls['start_db'].assert_called_once_with(update_db=False)
calls['change_cluster_name'].assert_called_once_with('new_id')
calls['stop_db'].assert_called_once_with()
def _prepare_dynamic(self, packages,
config_content='MockContent', device_path='/dev/vdb',
is_db_installed=True, backup_info=None,
is_root_enabled=False,
overrides=None):
mock_status = MagicMock()
mock_app = MagicMock()
mock_app.status = mock_status
self.manager._app = mock_app
mock_status.begin_install = MagicMock(return_value=None)
mock_app.install_if_needed = MagicMock(return_value=None)
mock_app.init_storage_structure = MagicMock(return_value=None)
mock_app.write_config = MagicMock(return_value=None)
mock_app.apply_initial_guestagent_configuration = MagicMock(
return_value=None)
mock_app.restart = MagicMock(return_value=None)
mock_app.start_db = MagicMock(return_value=None)
mock_app.stop_db = MagicMock(return_value=None)
mock_app._remove_system_tables = MagicMock(return_value=None)
os.path.exists = MagicMock(return_value=True)
volume.VolumeDevice.format = MagicMock(return_value=None)
volume.VolumeDevice.migrate_data = MagicMock(return_value=None)
volume.VolumeDevice.mount = MagicMock(return_value=None)
volume.VolumeDevice.mount_points = MagicMock(return_value=[])
with patch.object(pkg.Package, 'pkg_is_installed',
return_value=is_db_installed):
# invocation
self.manager.prepare(context=self.context, packages=packages,
config_contents=config_content,
databases=None,
memory_mb='2048', users=None,
device_path=device_path,
mount_point=self.__MOUNT_POINT,
backup_info=backup_info,
overrides=None,
cluster_config=None)
# verification/assertion
mock_status.begin_install.assert_any_call()
mock_app.install_if_needed.assert_any_call(packages)
mock_app._remove_system_tables.assert_any_call()
mock_app.init_storage_structure.assert_any_call('/var/lib/cassandra')
mock_app.apply_initial_guestagent_configuration.assert_any_call(
cluster_name=None)
mock_app.start_db.assert_any_call(update_db=False)
mock_app.stop_db.assert_any_call()
if backup_info:
mock_app._apply_post_restore_updates.assert_called_once_with(
backup_info)
def test_keyspace_validation(self):
valid_name = self._get_random_name(32)
db = models.CassandraSchema(valid_name)
self.assertEqual(valid_name, db.name)
with ExpectedException(ValueError):
models.CassandraSchema(self._get_random_name(33))
def test_user_validation(self):
valid_name = self._get_random_name(65535)
usr = models.CassandraUser(valid_name, 'password')
self.assertEqual(valid_name, usr.name)
self.assertEqual('password', usr.password)
with ExpectedException(ValueError):
models.CassandraUser(self._get_random_name(65536))
@classmethod
def _serialize_collection(self, *collection):
return [item.serialize() for item in collection]
@classmethod
def _get_random_name(self, size, chars=string.letters + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_create_database(self, conn):
db1 = models.CassandraSchema('db1')
db2 = models.CassandraSchema('db2')
db3 = models.CassandraSchema(self._get_random_name(32))
self.manager.create_database(self.context,
self._serialize_collection(db1, db2, db3))
conn.return_value.execute.assert_has_calls([
call(self.__CREATE_DB_FORMAT, (db1.name,)),
call(self.__CREATE_DB_FORMAT, (db2.name,)),
call(self.__CREATE_DB_FORMAT, (db3.name,))
])
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_delete_database(self, conn):
db = models.CassandraSchema(self._get_random_name(32))
self.manager.delete_database(self.context, db.serialize())
conn.return_value.execute.assert_called_once_with(
self.__DROP_DB_FORMAT, (db.name,))
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_create_user(self, conn):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr2', '')
usr3 = models.CassandraUser(self._get_random_name(1025), 'password')
self.manager.create_user(self.context,
self._serialize_collection(usr1, usr2, usr3))
conn.return_value.execute.assert_has_calls([
call(self.__CREATE_USR_FORMAT, (usr1.name,), (usr1.password,)),
call(self.__CREATE_USR_FORMAT, (usr2.name,), (usr2.password,)),
call(self.__CREATE_USR_FORMAT, (usr3.name,), (usr3.password,))
])
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_delete_user(self, conn):
usr = models.CassandraUser(self._get_random_name(1025), 'password')
self.manager.delete_user(self.context, usr.serialize())
conn.return_value.execute.assert_called_once_with(
self.__DROP_USR_FORMAT, (usr.name,))
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_change_passwords(self, conn):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr2', '')
usr3 = models.CassandraUser(self._get_random_name(1025), 'password')
self.manager.change_passwords(self.context, self._serialize_collection(
usr1, usr2, usr3))
conn.return_value.execute.assert_has_calls([
call(self.__ALTER_USR_FORMAT, (usr1.name,), (usr1.password,)),
call(self.__ALTER_USR_FORMAT, (usr2.name,), (usr2.password,)),
call(self.__ALTER_USR_FORMAT, (usr3.name,), (usr3.password,))
])
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_alter_user_password(self, conn):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr2', '')
usr3 = models.CassandraUser(self._get_random_name(1025), 'password')
self.admin.alter_user_password(usr1)
self.admin.alter_user_password(usr2)
self.admin.alter_user_password(usr3)
conn.return_value.execute.assert_has_calls([
call(self.__ALTER_USR_FORMAT, (usr1.name,), (usr1.password,)),
call(self.__ALTER_USR_FORMAT, (usr2.name,), (usr2.password,)),
call(self.__ALTER_USR_FORMAT, (usr3.name,), (usr3.password,))
])
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_grant_access(self, conn):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr1', 'password')
db1 = models.CassandraSchema('db1')
db2 = models.CassandraSchema('db2')
db3 = models.CassandraSchema('db3')
self.manager.grant_access(self.context, usr1.name, None, [db1.name,
db2.name])
self.manager.grant_access(self.context, usr2.name, None, [db3.name])
expected = []
for modifier in self.__ACCESS_MODIFIERS:
expected.append(call(self.__GRANT_FORMAT,
(modifier, db1.name, usr1.name)))
expected.append(call(self.__GRANT_FORMAT,
(modifier, db3.name, usr2.name)))
conn.return_value.execute.assert_has_calls(expected, any_order=True)
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_revoke_access(self, conn):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr1', 'password')
db1 = models.CassandraSchema('db1')
db2 = models.CassandraSchema('db2')
self.manager.revoke_access(self.context, usr1.name, None, db1.name)
self.manager.revoke_access(self.context, usr2.name, None, db2.name)
conn.return_value.execute.assert_has_calls([
call(self.__REVOKE_FORMAT, (db1.name, usr1.name)),
call(self.__REVOKE_FORMAT, (db2.name, usr2.name))
])
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_get_available_keyspaces(self, conn):
self.manager.list_databases(self.context)
conn.return_value.execute.assert_called_once_with(
self.__LIST_DB_FORMAT)
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_list_databases(self, conn):
db1 = models.CassandraSchema('db1')
db2 = models.CassandraSchema('db2')
db3 = models.CassandraSchema(self._get_random_name(32))
with patch.object(self.admin, self.__N_GAK, return_value={db1, db2,
db3}):
found = self.manager.list_databases(self.context)
self.assertEqual(2, len(found))
self.assertEqual(3, len(found[0]))
self.assertEqual(None, found[1])
self.assertIn(db1.serialize(), found[0])
self.assertIn(db2.serialize(), found[0])
self.assertIn(db3.serialize(), found[0])
with patch.object(self.admin, self.__N_GAK, return_value=set()):
found = self.manager.list_databases(self.context)
self.assertEqual(([], None), found)
def test_get_acl(self):
r0 = NonCallableMagicMock(username='user1', resource='<all keyspaces>',
permission='SELECT')
r1 = NonCallableMagicMock(username='user2', resource='<keyspace ks1>',
permission='SELECT')
r2 = NonCallableMagicMock(username='user2', resource='<keyspace ks2>',
permission='SELECT')
r3 = NonCallableMagicMock(username='user2', resource='<keyspace ks2>',
permission='ALTER')
r4 = NonCallableMagicMock(username='user3', resource='<table ks2.t1>',
permission='SELECT')
r5 = NonCallableMagicMock(username='user3', resource='',
permission='ALTER')
r6 = NonCallableMagicMock(username='user3', resource='<keyspace ks2>',
permission='')
r7 = NonCallableMagicMock(username='user3', resource='',
permission='')
r8 = NonCallableMagicMock(username='user3', resource='<keyspace ks1>',
permission='DELETE')
r9 = NonCallableMagicMock(username='user4', resource='<all keyspaces>',
permission='UPDATE')
r10 = NonCallableMagicMock(username='user4', resource='<keyspace ks1>',
permission='DELETE')
available_ks = {models.CassandraSchema('ks1'),
models.CassandraSchema('ks2'),
models.CassandraSchema('ks3')}
mock_result_set = [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r9, r9, r10]
execute_mock = MagicMock(return_value=mock_result_set)
mock_client = MagicMock(execute=execute_mock)
with patch.object(self.admin,
self.__N_GAK, return_value=available_ks) as gak_mock:
acl = self.admin._get_acl(mock_client)
execute_mock.assert_called_once_with(
self.__LIST_PERMISSIONS_FORMAT)
gak_mock.assert_called_once_with(mock_client)
self.assertEqual({'user1': {'ks1': {'SELECT'},
'ks2': {'SELECT'},
'ks3': {'SELECT'}},
'user2': {'ks1': {'SELECT'},
'ks2': {'SELECT', 'ALTER'}},
'user3': {'ks1': {'DELETE'}},
'user4': {'ks1': {'UPDATE', 'DELETE'},
'ks2': {'UPDATE'},
'ks3': {'UPDATE'}}
},
acl)
mock_result_set = [r1, r2, r3]
execute_mock = MagicMock(return_value=mock_result_set)
mock_client = MagicMock(execute=execute_mock)
with patch.object(self.admin,
self.__N_GAK, return_value=available_ks) as gak_mock:
acl = self.admin._get_acl(mock_client, username='user2')
execute_mock.assert_called_once_with(
self.__LIST_PERMISSIONS_OF_FORMAT.format('user2'))
gak_mock.assert_not_called()
self.assertEqual({'user2': {'ks1': {'SELECT'},
'ks2': {'SELECT', 'ALTER'}}}, acl)
mock_result_set = []
execute_mock = MagicMock(return_value=mock_result_set)
mock_client = MagicMock(execute=execute_mock)
with patch.object(self.admin,
self.__N_GAK, return_value=available_ks) as gak_mock:
acl = self.admin._get_acl(mock_client, username='nonexisting')
execute_mock.assert_called_once_with(
self.__LIST_PERMISSIONS_OF_FORMAT.format('nonexisting'))
gak_mock.assert_not_called()
self.assertEqual({}, acl)
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_get_listed_users(self, conn):
usr1 = models.CassandraUser(self._get_random_name(1025))
usr2 = models.CassandraUser(self._get_random_name(1025))
usr3 = models.CassandraUser(self._get_random_name(1025))
db1 = models.CassandraSchema('db1')
db2 = models.CassandraSchema('db2')
usr1.databases.append(db1.serialize())
usr3.databases.append(db2.serialize())
rv_1 = NonCallableMagicMock()
rv_1.configure_mock(name=usr1.name, super=False)
rv_2 = NonCallableMagicMock()
rv_2.configure_mock(name=usr2.name, super=False)
rv_3 = NonCallableMagicMock()
rv_3.configure_mock(name=usr3.name, super=True)
with patch.object(conn.return_value, 'execute', return_value=iter(
[rv_1, rv_2, rv_3])):
with patch.object(self.admin, '_get_acl',
return_value={usr1.name: {db1.name: {'SELECT'},
db2.name: {}},
usr3.name: {db2.name: {'SELECT'}}}
):
usrs = self.manager.list_users(self.context)
conn.return_value.execute.assert_has_calls([
call(self.__LIST_USR_FORMAT),
], any_order=True)
self.assertIn(usr1.serialize(), usrs[0])
self.assertIn(usr2.serialize(), usrs[0])
self.assertIn(usr3.serialize(), usrs[0])
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_list_access(self, conn):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr2')
usr3 = models.CassandraUser(self._get_random_name(1025), 'password')
db1 = models.CassandraSchema('db1').serialize()
db2 = models.CassandraSchema('db2').serialize()
usr2.databases.append(db1)
usr3.databases.append(db1)
usr3.databases.append(db2)
with patch.object(self.admin, self.__N_GLU, return_value={usr1, usr2,
usr3}):
usr1_dbs = self.manager.list_access(self.context, usr1.name, None)
usr2_dbs = self.manager.list_access(self.context, usr2.name, None)
usr3_dbs = self.manager.list_access(self.context, usr3.name, None)
self.assertEqual([], usr1_dbs)
self.assertEqual([db1], usr2_dbs)
self.assertEqual([db1, db2], usr3_dbs)
with patch.object(self.admin, self.__N_GLU, return_value=set()):
with ExpectedException(exception.UserNotFound):
self.manager.list_access(self.context, usr3.name, None)
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_list_users(self, conn):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr2')
usr3 = models.CassandraUser(self._get_random_name(1025), 'password')
with patch.object(self.admin, self.__N_GLU, return_value={usr1, usr2,
usr3}):
found = self.manager.list_users(self.context)
self.assertEqual(2, len(found))
self.assertEqual(3, len(found[0]))
self.assertEqual(None, found[1])
self.assertIn(usr1.serialize(), found[0])
self.assertIn(usr2.serialize(), found[0])
self.assertIn(usr3.serialize(), found[0])
with patch.object(self.admin, self.__N_GLU, return_value=set()):
self.assertEqual(([], None), self.manager.list_users(self.context))
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_get_user(self, conn):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr2')
usr3 = models.CassandraUser(self._get_random_name(1025), 'password')
with patch.object(self.admin, self.__N_GLU, return_value={usr1, usr2,
usr3}):
found = self.manager.get_user(self.context, usr2.name, None)
self.assertEqual(usr2.serialize(), found)
with patch.object(self.admin, self.__N_GLU, return_value=set()):
self.assertIsNone(
self.manager.get_user(self.context, usr2.name, None))
@patch.object(cass_service.CassandraAdmin, '_deserialize_keyspace',
side_effect=lambda p1: p1)
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_rename_user(self, conn, ks_deserializer):
usr = models.CassandraUser('usr')
db1 = models.CassandraSchema('db1').serialize()
db2 = models.CassandraSchema('db2').serialize()
usr.databases.append(db1)
usr.databases.append(db2)
new_user = models.CassandraUser('new_user')
with patch(self.__N_CAU, return_value=new_user):
with patch.object(self.admin, self.__N_BU, return_value=usr):
with patch.object(self.admin, self.__N_CU) as create:
with patch.object(self.admin, self.__N_GFA) as grant:
with patch.object(self.admin, self.__N_DU) as drop:
usr_attrs = {'name': 'user', 'password': 'trove'}
self.manager.update_attributes(self.context,
usr.name, None,
usr_attrs)
create.assert_called_once_with(ANY, new_user)
grant.assert_has_calls([call(ANY, db1, ANY),
call(ANY, db2, ANY)])
drop.assert_called_once_with(ANY, usr)
@patch.object(cass_service.CassandraLocalhostConnection, '__enter__')
def test_update_attributes(self, conn):
usr = models.CassandraUser('usr', 'pwd')
with patch.object(self.admin, self.__N_BU, return_value=usr):
usr_attrs = {'name': usr.name, 'password': usr.password}
with patch.object(self.admin, self.__N_RU) as rename:
with patch.object(self.admin, self.__N_AUP) as alter:
self.manager.update_attributes(self.context, usr.name,
None, usr_attrs)
self.assertEqual(0, rename.call_count)
self.assertEqual(0, alter.call_count)
usr_attrs = {'name': 'user', 'password': 'password'}
with patch.object(self.admin, self.__N_RU) as rename:
with patch.object(self.admin, self.__N_AUP) as alter:
self.manager.update_attributes(self.context, usr.name,
None, usr_attrs)
rename.assert_called_once_with(ANY, usr, usr_attrs['name'],
usr_attrs['password'])
self.assertEqual(0, alter.call_count)
usr_attrs = {'name': 'user', 'password': usr.password}
with patch.object(self.admin, self.__N_RU) as rename:
with patch.object(self.admin, self.__N_AUP) as alter:
self.manager.update_attributes(self.context, usr.name,
None, usr_attrs)
rename.assert_called_once_with(ANY, usr, usr_attrs['name'],
usr_attrs['password'])
self.assertEqual(0, alter.call_count)
usr_attrs = {'name': 'user'}
with patch.object(self.admin, self.__N_RU) as rename:
with patch.object(self.admin, self.__N_AUP) as alter:
with ExpectedException(
exception.UnprocessableEntity, "Updating username "
"requires specifying a password as well."):
self.manager.update_attributes(self.context, usr.name,
None, usr_attrs)
self.assertEqual(0, rename.call_count)
self.assertEqual(0, alter.call_count)
usr_attrs = {'name': usr.name, 'password': 'password'}
with patch.object(self.admin, self.__N_RU) as rename:
with patch.object(self.admin, self.__N_AUP) as alter:
self.manager.update_attributes(self.context, usr.name,
None, usr_attrs)
alter.assert_called_once_with(ANY, usr)
self.assertEqual(0, rename.call_count)
usr_attrs = {'password': usr.password}
with patch.object(self.admin, self.__N_RU) as rename:
with patch.object(self.admin, self.__N_AUP) as alter:
self.manager.update_attributes(self.context, usr.name,
None, usr_attrs)
self.assertEqual(0, rename.call_count)
self.assertEqual(0, alter.call_count)
usr_attrs = {'password': 'trove'}
with patch.object(self.admin, self.__N_RU) as rename:
with patch.object(self.admin, self.__N_AUP) as alter:
self.manager.update_attributes(self.context, usr.name,
None, usr_attrs)
alter.assert_called_once_with(ANY, usr)
self.assertEqual(0, rename.call_count)
def test_update_overrides(self):
cfg_mgr_mock = MagicMock()
self.manager._app.configuration_manager = cfg_mgr_mock
overrides = NonCallableMagicMock()
self.manager.update_overrides(Mock(), overrides)
cfg_mgr_mock.apply_user_override.assert_called_once_with(overrides)
cfg_mgr_mock.remove_user_override.assert_not_called()
def test_remove_overrides(self):
cfg_mgr_mock = MagicMock()
self.manager._app.configuration_manager = cfg_mgr_mock
self.manager.update_overrides(Mock(), {}, remove=True)
cfg_mgr_mock.remove_user_override.assert_called_once_with()
cfg_mgr_mock.apply_user_override.assert_not_called()
def test_apply_overrides(self):
self.assertIsNone(
self.manager.apply_overrides(Mock(), NonCallableMagicMock()))
def test_enable_root(self):
with patch.object(self.manager._app, 'is_root_enabled',
return_value=False):
with patch.object(cass_service.CassandraAdmin,
'_create_superuser') as create_mock:
self.manager.enable_root(self.context)
create_mock.assert_called_once_with(ANY)
with patch.object(self.manager._app, 'is_root_enabled',
return_value=True):
with patch.object(cass_service.CassandraAdmin,
'alter_user_password') as alter_mock:
self.manager.enable_root(self.context)
alter_mock.assert_called_once_with(ANY)
def test_is_root_enabled(self):
trove_admin = Mock()
trove_admin.configure_mock(name=self.manager._app._ADMIN_USER)
other_admin = Mock()
other_admin.configure_mock(name='someuser')
with patch.object(cass_service.CassandraAdmin,
'list_superusers', return_value=[]):
self.assertFalse(self.manager.is_root_enabled(self.context))
with patch.object(cass_service.CassandraAdmin,
'list_superusers', return_value=[trove_admin]):
self.assertFalse(self.manager.is_root_enabled(self.context))
with patch.object(cass_service.CassandraAdmin,
'list_superusers', return_value=[other_admin]):
self.assertTrue(self.manager.is_root_enabled(self.context))
with patch.object(cass_service.CassandraAdmin,
'list_superusers',
return_value=[trove_admin, other_admin]):
self.assertTrue(self.manager.is_root_enabled(self.context))
| |
#!/usr/bin/env python
#
# Copyright 2015, Corey Richardson
# Copyright 2014, NICTA
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(NICTA_BSD)
#
#
# seL4 System Call Stub Generator
# ===============================
#
# 2009 David Greenaway
#
# This script generates system call stubs based on an XML specification of the
# objects that the kernel exports (and the methods those objects export).
#
# Previously, Magpie (an IDL compiler) was used to generate these stubs. As
# Magpie development progressed, support for a fixed ABI (i.e., the ABI
# implemented by the seL4 kernel) was lost, and support for generating
# alignment-safe code (required by platforms such as ARM) was also removed.
#
# This script is a stop-gap until these features can be restored in Magpie
# once again.
#
# The script has certain limitations:
#
# * It must be told the size of all types. This includes complex types
# such as structures.
#
# We generate code that will cause compilation to fail if we get any
# object's size wrong, which should help mitigate the number of bugs caused
# because of this script becoming out of date compared to the source files.
#
# * The word-size is fixed at 32 bits, and we may implicitly assume that
# sizeof(int) == sizeof(long) == 32.
#
# Though the constant 'WORD_SIZE_BITS' has been used throughout, there
# may be implicit assumptions hanging around causing things to fail.
#
# * The script has only been tested on the actual seL4 API XML description.
#
# No stress testing has taken place; there may be bugs if new and wonderful
# XML method descriptions are added.
#
import xml.dom.minidom
import optparse
# Number of bits in a standard word
WORD_SIZE_BITS = 32
# Maximum number of words that will be in a message.
MAX_MESSAGE_LENGTH = 32
MESSAGE_REGISTERS_FOR_ARCH = {
"arm": 4,
"x86": 2,
}
TYPES = {
8: "u8",
16: "u16",
32: "u32",
64: "u64"
}
TYPE_TRANS = {
"int": "isize",
"seL4_Uint8": "u8",
"seL4_Uint16": "u16",
"seL4_Uint32": "u32",
"seL4_Uint64": "u64",
"seL4_Bool": "u8",
"seL4_CapData_t": "seL4_CapData",
}
def translate_type(name):
if name in TYPE_TRANS:
return TYPE_TRANS[name]
else:
return name
def translate_expr(name):
if name == "type":
return "type_"
return name
def construction(expr, param):
if isinstance(param.type, StructType):
return "%s { words: [%s] }" % (param.type.name, expr)
else:
return "%s as %s" % (expr, translate_type(param.type.name))
class Type(object):
"""
This class represents a C type (such as an 'int', structure or
pointer.
"""
def __init__(self, name, size_bits, double_word=False, native_size_bits=None):
"""
Define a new type, named 'name' that is 'size_bits' bits
long.
"""
self.name = name
self.size_bits = size_bits
self.double_word = double_word
#
# Store the number of bits C will use for this type
# in its native unpacked form.
#
# Required for 'bool', for example, which only uses 1
# bit when packed, but 32 bits when unpacked.
#
if native_size_bits:
self.native_size_bits = native_size_bits
else:
self.native_size_bits = size_bits
def pass_by_reference(self):
return self.size_bits > WORD_SIZE_BITS and not self.double_word
def render_parameter_name(self, name):
"""
Return a string of C code that would be used in a function
parameter declaration.
"""
if name == "type":
name = "type_"
return "%s: %s" % (name, translate_type(self.name))
def pointer(self):
"""
Return a new Type class representing a pointer to this
object.
"""
return PointerType(self)
def c_expression(self, var_name, word_num=0):
"""
Return code for a C expression that gets word 'word_num'
of this type.
"""
assert word_num == 0
return "%s" % var_name
def double_word_expression(self, var_name, word_num):
assert(word_num == 0 or word_num == 1)
if word_num == 0:
return "{1} as {0}".format(TYPES[WORD_SIZE_BITS], var_name)
elif word_num == 1:
return "({1} >> {2}) as {0}".format(TYPES[WORD_SIZE_BITS], var_name, WORD_SIZE_BITS)
class PointerType(Type):
"""
A pointer to a standard type.
"""
def __init__(self, base_type):
Type.__init__(self, base_type.name, WORD_SIZE_BITS)
self.base_type = base_type
def render_parameter_name(self, name):
if name == "type":
name = "type_"
return "%s: *mut %s" % (name, translate_type(self.name))
def c_expression(self, var_name, word_num=0):
assert word_num == 0
return "unsafe { *%s }" % var_name
def pointer(self):
raise NotImplementedError()
class CapType(Type):
"""
A type that is just a typedef of seL4_CPtr.
"""
def __init__(self, name):
Type.__init__(self, name, WORD_SIZE_BITS)
class StructType(Type):
"""
A C 'struct' definition.
"""
def __init__(self, name, size_bits):
Type.__init__(self, name, size_bits)
def c_expression(self, var_name, word_num, member_name):
assert word_num < self.size_bits / WORD_SIZE_BITS
# Multiword structure.
assert self.pass_by_reference()
return "(*%s).%s" % (var_name, member_name[word_num])
class BitFieldType(Type):
"""
A special C 'struct' generated by the bitfield generator
"""
def __init__(self, name, size_bits):
Type.__init__(self, name, size_bits)
def c_expression(self, var_name, word_num=0):
return "%s.words[%d]" % (var_name, word_num)
class Parameter(object):
def __init__(self, name, type):
self.name = name
self.type = type
#
# Return the size (in bits) of a particular type.
#
types = [
# Simple Types
Type("int", WORD_SIZE_BITS),
Type("seL4_Uint8", 8),
Type("seL4_Uint16", 16),
Type("seL4_Uint32", 32),
Type("seL4_Uint64", 64, double_word=True),
Type("seL4_Word", WORD_SIZE_BITS),
Type("seL4_Bool", 1, native_size_bits=8),
Type("seL4_CapRights", WORD_SIZE_BITS),
# seL4 Structures
BitFieldType("seL4_CapData_t", WORD_SIZE_BITS),
# Object types
CapType("seL4_CPtr"),
CapType("seL4_CNode"),
CapType("seL4_IRQHandler"),
CapType("seL4_IRQControl"),
CapType("seL4_TCB"),
CapType("seL4_Untyped"),
CapType("seL4_DomainSet"),
]
#
# Arch-specific types.
#
arch_types = {
"arm" : [
Type("seL4_ARM_VMAttributes", WORD_SIZE_BITS),
CapType("seL4_ARM_Page"),
CapType("seL4_ARM_PageTable"),
CapType("seL4_ARM_PageDirectory"),
CapType("seL4_ARM_ASIDControl"),
CapType("seL4_ARM_ASIDPool"),
StructType("seL4_UserContext", WORD_SIZE_BITS * 17),
],
"x86" : [
Type("seL4_IA32_VMAttributes", WORD_SIZE_BITS),
CapType("seL4_IA32_ASIDControl"),
CapType("seL4_IA32_ASIDPool"),
CapType("seL4_IA32_IOSpace"),
CapType("seL4_IA32_IOPort"),
CapType("seL4_IA32_Page"),
CapType("seL4_IA32_PageDirectory"),
CapType("seL4_IA32_PageTable"),
CapType("seL4_IA32_IOPageTable"),
StructType("seL4_UserContext", WORD_SIZE_BITS * 13),
]
}
# Retrieve a member list for a given struct type
def struct_members(type, structs):
members = [member for struct_name, member in structs if struct_name == type.name]
assert len(members) == 1
return members[0]
# Keep increasing the given number 'x' until 'x % a == 0'.
def align_up(x, a):
if x % a == 0:
return x
return x + a - (x % a)
def get_parameter_positions(parameters):
"""
Determine where each parameter should be packed in the generated message.
We generate a list of:
(param_name, param_type, first_bit, num_bits)
tuples.
We guarantee that either (num_words == 1) or (bit_offset == 0).
"""
words_used = 0
bits_used = 0
results = []
for param in parameters:
# How big are we?
type_size = param.type.size_bits
# We need everything to be a power of two, or word sized.
assert ((type_size & (type_size - 1)) == 0) or (type_size % WORD_SIZE_BITS == 0)
# Align up to our own size, or the next word. (Whichever is smaller)
bits_used = align_up(bits_used, min(type_size, WORD_SIZE_BITS))
# Place ourself.
results.append((param, bits_used, type_size))
bits_used += type_size
return results
def generate_param_list(input_params, output_params):
# Generate parameters
params = []
for param in input_params:
if not param.type.pass_by_reference():
params.append(param.type.render_parameter_name(param.name))
else:
params.append(param.type.pointer().render_parameter_name(param.name))
for param in output_params:
if param.type.pass_by_reference():
params.append(param.type.pointer().render_parameter_name(param.name))
return ", ".join(params)
def generate_marshal_expressions(params, num_mrs, structs):
"""
Generate marshalling expressions for the given set of inputs.
We return a list of expressions; one expression per word required
to marshal all the inputs.
"""
def generate_param_code(param, first_bit, num_bits, word_array):
"""
Generate code to marshal the given parameter into the correct
location in the message.
'word_array' is an array of the final contents of the message.
word_array[k] contains what should be placed in the k'th message
register, and is an array of expressions that will (eventually)
be bitwise-or'ed into it.
"""
target_word = first_bit / WORD_SIZE_BITS
target_offset = first_bit % WORD_SIZE_BITS
# double word type
if param.type.double_word:
word_array[target_word].append(param.type.double_word_expression(param.name, 0))
word_array[target_word + 1].append(param.type.double_word_expression(param.name, 1))
return
# Single full word?
if num_bits == WORD_SIZE_BITS:
assert target_offset == 0
expr = param.type.c_expression(param.name);
word_array[target_word].append(expr)
return
# Part of a word?
if num_bits < WORD_SIZE_BITS:
expr = param.type.c_expression(param.name);
expr = "(%s & %#x)" % (expr, (1 << num_bits) - 1)
if target_offset:
expr = "((%s as seL4_Word) << %d)" % (expr, target_offset)
word_array[target_word].append(expr)
return
# Multiword array
assert target_offset == 0
num_words = num_bits / WORD_SIZE_BITS
for i in range(num_words):
expr = param.type.c_expression(param.name, i, struct_members(param.type, structs));
word_array[target_word + i].append(expr)
# Get their marshalling positions
positions = get_parameter_positions(params)
# Generate marshal code.
words = [[] for _ in range(num_mrs, MAX_MESSAGE_LENGTH)]
for (param, first_bit, num_bits) in positions:
generate_param_code(param, first_bit, num_bits, words)
# Return list of expressions.
return [" | ".join(map(lambda x: "(" + translate_expr(x) + " as seL4_Word)", x)) for x in words if len(x) > 0]
def generate_unmarshal_expressions(params):
"""
Generate unmarshalling expressions for the given set of outputs.
We return a list of list of expressions; one list per variable, containing
expressions for the words in it that must be unmarshalled. The expressions
will have tokens of the form:
"%(w0)s"
in them, indicating a read from a word in the message.
"""
def unmarshal_single_param(first_bit, num_bits):
"""
Unmarshal a single parameter.
"""
first_word = first_bit / WORD_SIZE_BITS
bit_offset = first_bit % WORD_SIZE_BITS
# Multiword type?
if num_bits > WORD_SIZE_BITS:
result = []
for x in range(num_bits / WORD_SIZE_BITS):
result.append("%%(w%d)s" % (x + first_word))
return result
# Otherwise, bit packed.
if num_bits == WORD_SIZE_BITS:
return ["%%(w%d)s" % first_word]
elif bit_offset == 0:
return ["(%%(w%d)s & %#x)" % (
first_word, (1 << num_bits) - 1)]
else:
return ["(%%(w%d)s >> %d) & %#x" % (
first_word, bit_offset, (1 << num_bits) - 1)]
# Get their marshalling positions
positions = get_parameter_positions(params)
# Generate the unmarshal code.
results = []
for (param, first_bit, num_bits) in positions:
results.append((param, unmarshal_single_param(first_bit, num_bits)))
return results
def generate_result_struct(interface_name, method_name, output_params):
"""
Generate a structure definition to be returned by the system call stubs to
the user.
We have a few constraints:
* We always need an 'error' output parameter, even though it won't
appear in the list 'output_params' given to us.
* Output parameters may be marked as 'pass_by_reference', indicating
that we only ever see pointers to the item.
If no structure is needed (i.e., we just return an error code), we return
'None'.
"""
# Do we actually need a structure?
if len([x for x in output_params if not x.type.pass_by_reference()]) == 0:
return None
#
# Generate the structure:
#
# struct seL4_CNode_Copy {
# int error;
# seL4_Word foo;
# };
# typedef struct seL4_CNode_Copy seL4_CNode_Copy_t;
#
result = []
result.append("#[repr(C)] pub struct %s_%s {" % (interface_name, method_name))
result.append("\terror: isize,")
for i in output_params:
if not i.type.pass_by_reference():
result.append("\t%s," % i.type.render_parameter_name(i.name))
result.append("}")
result.append("")
return "\n".join(result)
def generate_stub(arch, interface_name, method_name, method_id, input_params, output_params, structs, use_only_ipc_buffer):
result = []
if use_only_ipc_buffer:
num_mrs = 0
else:
num_mrs = MESSAGE_REGISTERS_FOR_ARCH[arch]
# Split out cap parameters and standard parameters
standard_params = []
cap_params = []
for x in input_params:
if isinstance(x.type, CapType):
cap_params.append(x)
else:
standard_params.append(x)
# Determine if we are returning a structure, or just the error code.
returning_struct = False
results_structure = generate_result_struct(interface_name, method_name, output_params)
if results_structure:
return_type = "%s_%s" % (interface_name, method_name)
returning_struct = True
else:
return_type = "isize"
#
# Print function header.
#
# static inline int
# seL4_Untyped_Retype(...)
# {
#
result.append("#[inline(always)]")
result.append("pub unsafe fn %s_%s(%s) -> %s" % (interface_name, method_name,
generate_param_list(input_params, output_params), return_type))
result.append("{")
#
# Get a list of expressions for our caps and inputs.
#
input_expressions = generate_marshal_expressions(standard_params, num_mrs, structs)
cap_expressions = [x.name for x in cap_params]
service_cap = cap_expressions[0]
cap_expressions = cap_expressions[1:]
#
# Compute how many words the inputs and output will require.
#
input_param_words = len(input_expressions)
output_param_words = sum([p.type.size_bits for p in output_params]) / WORD_SIZE_BITS
#
# Setup variables we will need.
#
if returning_struct:
result.append("\tlet mut result: %s = ::core::mem::zeroed();" % return_type)
result.append("\tlet tag = seL4_MessageInfo::new(InvocationLabel::%s as u32, 0, %d, %d);" % (method_id, len(cap_expressions), len(input_expressions)))
result.append("\tlet output_tag;")
for i in range(min(num_mrs, max(input_param_words, output_param_words))):
result.append("\tlet mut mr%d: seL4_Word = 0;" % i)
result.append("")
#
# Copy capabilities.
#
# /* Setup input capabilities. */
# seL4_SetCap(i, cap);
#
if len(cap_expressions) > 0:
result.append("\t/* Setup input capabilities. */")
for i in range(len(cap_expressions)):
result.append("\tseL4_SetCap(%d, %s);" % (i, cap_expressions[i]))
result.append("")
#
# Copy in the inputs.
#
# /* Marshal input parameters. */
# seL4_SetMR(i, v);
# ...
#
if len(input_expressions) > 0:
result.append("\t/* Marshal input parameters. */")
for i in range(len(input_expressions)):
if input_expressions[i] == "type":
input_expressions[i] = "type_"
if i < num_mrs:
result.append("\tmr%d = %s as seL4_Word;" % (i, input_expressions[i]))
else:
result.append("\tseL4_SetMR(%d, %s);" % (i, input_expressions[i]))
result.append("")
#
# Generate the call.
#
call_arguments = []
for i in range(num_mrs):
if i < max(input_param_words, output_param_words):
call_arguments.append("&mut mr%d" % i)
else:
call_arguments.append("::core::ptr::null_mut()")
if use_only_ipc_buffer:
result.append("\t/* Perform the call. */")
result.append("\toutput_tag = seL4_Call(%s, tag);" % service_cap)
else:
result.append("\t/* Perform the call, passing in-register arguments directly. */")
result.append("\toutput_tag = seL4_CallWithMRs(%s, tag," % (service_cap))
result.append("\t\t%s);" % ', '.join(
[call_arguments[i] for i in range(num_mrs)]))
result.append("")
#
# Generate unmarshalling code.
#
if len(output_params) > 0:
result.append("\t/* Unmarshal result. */")
source_words = {}
for i in range(MAX_MESSAGE_LENGTH):
if i < num_mrs:
source_words["w%d" % i] = "mr%d" % i;
else:
source_words["w%d" % i] = "seL4_GetMR(%d)" % i;
unmashalled_params = generate_unmarshal_expressions(output_params)
for (param, words) in unmashalled_params:
if param.type.pass_by_reference():
members = struct_members(param.type, structs);
for i in range(len(words)):
result.append("\t(*%s).%s = %s;" % (param.name, members[i], words[i] % source_words))
else:
if param.type.double_word:
result.append("\tresult.%s = ((%s)%s + ((%s)%s << 32));" % (param.name, TYPES[64], words[0] % source_words, TYPES[64], words[1] % source_words))
else:
for word in words:
result.append("\tresult.%s = %s;" % (param.name, construction(word % source_words, param)))
result.append("")
# Return result
if returning_struct:
result.append("\tresult.error = output_tag.get_label() as isize;")
result.append("\tresult")
else:
result.append("\toutput_tag.get_label() as isize")
#
# }
#
result.append("}")
return "\n".join(result) + "\n"
def parse_xml_file(input_file, valid_types):
"""
Parse an XML file containing method definitions.
"""
# Create a dictionary of type name to type.
type_names = {}
for i in valid_types:
type_names[i.name] = i
# Parse the XML to generate method structures.
methods = []
structs = []
doc = xml.dom.minidom.parse(input_file)
for struct in doc.getElementsByTagName("struct"):
struct_members = []
struct_name = struct.getAttribute("name")
for members in struct.getElementsByTagName("member"):
member_name = members.getAttribute("name")
struct_members.append(member_name)
structs.append( (struct_name, struct_members) )
for interface in doc.getElementsByTagName("interface"):
interface_name = interface.getAttribute("name")
for method in interface.getElementsByTagName("method"):
method_name = method.getAttribute("name")
method_id = method.getAttribute("id")
#
# Get parameters.
#
# We always have an implicit cap parameter.
#
input_params = [Parameter("service", type_names[interface_name])]
output_params = []
for param in method.getElementsByTagName("param"):
param_name = param.getAttribute("name")
param_type = type_names.get(param.getAttribute("type"))
if not param_type:
raise Exception("Unknown type '%s'." % (param.getAttribute("type")))
param_dir = param.getAttribute("dir")
assert (param_dir == "in") or (param_dir == "out")
if (param_dir == "in"):
input_params.append(Parameter(param_name, param_type))
else:
output_params.append(Parameter(param_name, param_type))
methods.append((interface_name, method_name, method_id, input_params, output_params))
return (methods, structs)
def generate_stub_file(arch, input_files, output_file, use_only_ipc_buffer):
"""
Generate a header file containing system call stubs for seL4.
"""
result = []
# Ensure architecture looks sane.
if not arch in arch_types.keys():
raise Exception("Invalid architecture. Expected %s.",
" or ".join(arch_types.keys()))
# Parse XML
methods = []
structs = []
for file in input_files:
method, struct = parse_xml_file(file, types + arch_types[arch])
methods += method
structs += struct
# Print header.
result.append("""
/*
* Automatically generated system call stubs.
*/
""");
#
# Generate structures needed to return results back to the user.
#
# We can not use pass-by-reference (except for really large objects), as
# the verification framework does not support them.
#
result.append("/*")
result.append(" * Return types for generated methods.")
result.append(" */")
for (interface_name, method_name, _, _, output_params) in methods:
results_structure = generate_result_struct(interface_name, method_name, output_params)
if results_structure:
result.append(results_structure)
#
# Generate the actual stub code.
#
result.append("/*")
result.append(" * Generated stubs.")
result.append(" */")
for (interface_name, method_name, method_id, inputs, outputs) in methods:
result.append(generate_stub(arch, interface_name, method_name,
method_id, inputs, outputs, structs, use_only_ipc_buffer))
# Write the output
output = open(output_file, "w")
output.write("\n".join(result))
output.close()
def main():
#
# Read command line arguments.
#
parser = optparse.OptionParser(
usage = "usage: %prog -a <arch> -e [sel4 | libsel4] [-o <ouput file] <input XML> [<input XML> ...]")
parser.add_option("-a", "--arch",
dest="arch", help="Architecture to generate stubs for.")
parser.add_option("-o", "--output",
dest="output", help="Output file to write stub to.")
parser.add_option("-b", "--buffer", action="store_true",
help="Use IPC buffer exclusively (i.e. do not pass syscall "
"arguments by registers).")
(options, args) = parser.parse_args()
# Validate arguments
if len(args) < 1:
parser.error("Require at least one input file.")
if not options.arch:
parser.error("Require an architecture to be specified.")
if not options.output:
options.output = "/dev/stdout"
input_files = args
# Generate the stubs.
generate_stub_file(options.arch, input_files, options.output, options.buffer)
main()
| |
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates C++ code representing structured data objects from schema.org
This script generates C++ objects based on a JSON+LD schema file. Blink uses the
generated code to scrape schema.org data from web pages.
"""
import os
import sys
import json
import argparse
_current_dir = os.path.dirname(os.path.realpath(__file__))
# jinja2 is in chromium's third_party directory
# Insert at front to override system libraries, and after path[0] == script dir
sys.path.insert(
1, os.path.join(_current_dir, *([os.pardir] * 2 + ['third_party'])))
import jinja2
from jinja2 import Environment, PackageLoader, select_autoescape
env = Environment(loader=PackageLoader('generate_schema_org_code', ''))
env.trim_blocks = True
env.lstrip_blocks = True
SCHEMA_ORG_PREFIX = 'http://schema.org/'
def schema_org_id(object_name):
return SCHEMA_ORG_PREFIX + object_name
def object_name_from_id(the_id):
"""Get the object name from a schema.org ID."""
return the_id[len(SCHEMA_ORG_PREFIX):]
def get_schema_obj(obj_id, schema):
"""Search the schema graph for an object with the given ID."""
matches = [obj for obj in schema['@graph'] if obj['@id'] == obj_id]
return matches[0] if len(matches) == 1 else None
def is_enum_type(class_obj):
if 'rdfs:subClassOf' in class_obj:
parent_class = class_obj['rdfs:subClassOf']
if isinstance(parent_class, list):
return any(parent['@id'] == schema_org_id('Enumeration')
for parent in parent_class)
return parent_class['@id'] == schema_org_id('Enumeration')
def make_entity(thing, names):
return {
"name": object_name_from_id(thing['@id']),
"name_hash": names[object_name_from_id(thing['@id'])]
}
def make_entity_from_name(name, names):
return {"name": name, "name_hash": names[name]}
def find_enum_options(obj_id, schema, names):
return [
make_entity(obj, names) for obj in schema['@graph']
if obj['@type'] == obj_id
]
def get_root_type(the_class, schema):
"""Get the base type the class is descended from."""
class_obj = get_schema_obj(the_class['@id'], schema)
if class_obj is None:
return the_class
if class_obj['@id'] == schema_org_id('Thing'):
return class_obj
# Consider URLs to be a base type as we will use have a struct field for
# them specifically.
if class_obj['@id'] == schema_org_id('URL'):
return class_obj
if ('@type' in class_obj
and schema_org_id('DataType') in class_obj['@type']):
return class_obj
if 'rdfs:subClassOf' in class_obj:
parent_class = class_obj['rdfs:subClassOf']
# All classes that use multiple inheritance are Thing type.
if isinstance(parent_class, list):
return get_schema_obj(schema_org_id('Thing'), schema)
# Enumeration classes are treated specially. Return the specific type
# of enum this class is.
if parent_class['@id'] == schema_org_id('Enumeration'):
return class_obj
return get_root_type(parent_class, schema)
return class_obj
def parse_property(prop, schema, names):
"""Parse out details about the property, including what type it can be."""
parsed_prop = {
'name': object_name_from_id(prop['@id']),
'name_hash': names[object_name_from_id(prop['@id'])],
'thing_types': [],
'enum_types': []
}
if not schema_org_id('rangeIncludes') in prop:
return parsed_prop
rangeIncludes = prop[schema_org_id('rangeIncludes')]
if not isinstance(rangeIncludes, list):
rangeIncludes = [rangeIncludes]
for possible_type in rangeIncludes:
root_type = get_root_type(possible_type, schema)
if root_type['@id'] == schema_org_id('Thing'):
parsed_prop['thing_types'].append(possible_type['@id'])
elif root_type['@id'] == schema_org_id('Text'):
parsed_prop['has_text'] = True
elif root_type['@id'] == schema_org_id('Date'):
parsed_prop['has_date'] = True
elif root_type['@id'] == schema_org_id('Time'):
parsed_prop['has_time'] = True
elif root_type['@id'] == schema_org_id('Boolean'):
parsed_prop['has_boolean'] = True
elif root_type['@id'] == schema_org_id('Number'):
parsed_prop['has_number'] = True
elif root_type['@id'] == schema_org_id('DateTime'):
parsed_prop['has_date_time'] = True
elif root_type['@id'] == schema_org_id('URL'):
parsed_prop['has_url'] = True
elif is_enum_type(root_type):
parsed_prop['enum_types'].append(possible_type['@id'])
return parsed_prop
def merge_with_schema(schema, overrides, thing):
indices = [
i for i, x in enumerate(schema['@graph']) if x['@id'] == thing['@id']
]
for index in indices:
schema['@graph'][index] = thing
if not indices:
schema['@graph'].append(thing)
def lookup_parents(thing, schema, lookup_table):
"""Recursively looks up all the parents of thing in the schema.
Returns the parents and populates them in lookup_table. The parents list may
contain duplicates if thing has multiple inheritance trees.
"""
obj_name = object_name_from_id(thing['@id'])
if obj_name in lookup_table:
return lookup_table[obj_name]
lookup_table[obj_name] = set()
if 'rdfs:subClassOf' in thing:
parent_classes = thing['rdfs:subClassOf']
if not isinstance(parent_classes, list):
parent_classes = [parent_classes]
parent_classes = [
get_schema_obj(parent['@id'], schema) for parent in parent_classes
]
parent_classes = [
parent for parent in parent_classes if parent is not None
]
found_parents = [
lookup_parents(parent_thing, schema, lookup_table)
for parent_thing in parent_classes
]
# flatten the list
found_parents = [item for sublist in found_parents for item in sublist]
lookup_table[obj_name].update(found_parents)
lookup_table[obj_name].add(obj_name)
return lookup_table[obj_name]
def get_template_vars_from_file(schema_file_path, overrides_file_path,
name_file_path):
with open(schema_file_path) as schema_file:
schema = json.loads(schema_file.read())
with open(name_file_path) as names_file:
names = json.loads(names_file.read())
if overrides_file_path:
with open(overrides_file_path) as overrides_file:
overrides = json.loads(overrides_file.read())
for thing in overrides['@graph']:
merge_with_schema(schema, overrides, thing)
return get_template_vars(schema, names)
def get_template_vars(schema, names):
"""Read the needed template variables from the schema file."""
template_vars = {
'entities': [],
'properties': [],
'enums': [],
'entity_parent_lookup': []
}
entity_parent_lookup = {}
for thing in schema['@graph']:
if thing['@type'] == 'rdfs:Class':
template_vars['entities'].append(make_entity(thing, names))
lookup_parents(thing, schema, entity_parent_lookup)
if is_enum_type(thing):
template_vars['enums'].append({
'name':
object_name_from_id(thing['@id']),
'id':
thing['@id'],
'id_hash':
names[thing['@id']],
'options':
find_enum_options(thing['@id'], schema, names)
})
elif thing['@type'] == 'rdf:Property':
template_vars['properties'].append(
parse_property(thing, schema, names))
for entity, parents in entity_parent_lookup.iteritems():
template_vars['entity_parent_lookup'].append({
'name':
entity,
'name_hash':
names[entity],
'parents':
[make_entity_from_name(parent, names) for parent in parents]
})
template_vars['entities'].sort(key=lambda p: p['name_hash'])
template_vars['properties'].sort(key=lambda p: p['name'])
return template_vars
def generate_file(file_name, template_file, template_vars):
"""Generate and write file given a template and variables to render."""
template_vars['header_file'] = os.path.basename(
template_file[:template_file.index('.')])
template_vars['header_guard'] = template_vars['header_file'].upper() + '_H'
with open(file_name, 'w') as f:
f.write(env.get_template(template_file).render(template_vars))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--schema-file',
help='Schema.org JSON-LD schema file to use for code generation.')
parser.add_argument(
'--overrides-file',
help='JSON-LD schema file with overrides to support changes not in the '
'latest schema.org version. Optional.')
parser.add_argument(
'--name-file',
help='JSON file of hashed schema.org names to speed up lookups.')
parser.add_argument(
'--output-dir',
help='Output directory in which to place generated code files.')
parser.add_argument('--templates', nargs='+')
args = parser.parse_args()
template_vars = get_template_vars_from_file(
args.schema_file, args.overrides_file, args.name_file)
for template_file in args.templates:
generate_file(
os.path.join(args.output_dir,
os.path.basename(template_file.replace('.tmpl', ''))),
template_file, template_vars)
if __name__ == '__main__':
sys.exit(main())
| |
from __future__ import absolute_import
import pytest
pytest.importorskip('numpy')
pytest.importorskip('scipy')
import numpy as np
import scipy.linalg
import dask.array as da
from dask.array.linalg import tsqr, svd_compressed, qr, svd
from dask.array.utils import assert_eq
def same_keys(a, b):
def key(k):
if isinstance(k, str):
return (k, -1, -1, -1)
else:
return k
return sorted(a.dask, key=key) == sorted(b.dask, key=key)
def test_tsqr_regular_blocks():
m, n = 20, 10
mat = np.random.rand(m, n)
data = da.from_array(mat, chunks=(10, n), name='A')
q, r = tsqr(data)
q = np.array(q)
r = np.array(r)
assert_eq(mat, np.dot(q, r)) # accuracy check
assert_eq(np.eye(n, n), np.dot(q.T, q)) # q must be orthonormal
assert_eq(r, np.triu(r)) # r must be upper triangular
def test_tsqr_irregular_blocks():
m, n = 20, 10
mat = np.random.rand(m, n)
data = da.from_array(mat, chunks=(3, n), name='A')[1:]
mat2 = mat[1:, :]
q, r = tsqr(data)
q = np.array(q)
r = np.array(r)
assert_eq(mat2, np.dot(q, r)) # accuracy check
assert_eq(np.eye(n, n), np.dot(q.T, q)) # q must be orthonormal
assert_eq(r, np.triu(r)) # r must be upper triangular
def test_tsqr_svd_regular_blocks():
m, n = 20, 10
mat = np.random.rand(m, n)
data = da.from_array(mat, chunks=(10, n), name='A')
u, s, vt = tsqr(data, compute_svd=True)
u = np.array(u)
s = np.array(s)
vt = np.array(vt)
usvt = np.dot(u, np.dot(np.diag(s), vt))
s_exact = np.linalg.svd(mat)[1]
assert_eq(mat, usvt) # accuracy check
assert_eq(np.eye(n, n), np.dot(u.T, u)) # u must be orthonormal
assert_eq(np.eye(n, n), np.dot(vt, vt.T)) # v must be orthonormal
assert_eq(s, s_exact) # s must contain the singular values
def test_tsqr_svd_irregular_blocks():
m, n = 20, 10
mat = np.random.rand(m, n)
data = da.from_array(mat, chunks=(3, n), name='A')[1:]
mat2 = mat[1:, :]
u, s, vt = tsqr(data, compute_svd=True)
u = np.array(u)
s = np.array(s)
vt = np.array(vt)
usvt = np.dot(u, np.dot(np.diag(s), vt))
s_exact = np.linalg.svd(mat2)[1]
assert_eq(mat2, usvt) # accuracy check
assert_eq(np.eye(n, n), np.dot(u.T, u)) # u must be orthonormal
assert_eq(np.eye(n, n), np.dot(vt, vt.T)) # v must be orthonormal
assert_eq(s, s_exact) # s must contain the singular values
def test_linalg_consistent_names():
m, n = 20, 10
mat = np.random.rand(m, n)
data = da.from_array(mat, chunks=(10, n), name='A')
q1, r1 = qr(data)
q2, r2 = qr(data)
assert same_keys(q1, q2)
assert same_keys(r1, r2)
u1, s1, v1 = svd(data)
u2, s2, v2 = svd(data)
assert same_keys(u1, u2)
assert same_keys(s1, s2)
assert same_keys(v1, v2)
@pytest.mark.slow
def test_svd_compressed():
m, n = 2000, 250
r = 10
np.random.seed(4321)
mat1 = np.random.randn(m, r)
mat2 = np.random.randn(r, n)
mat = mat1.dot(mat2)
data = da.from_array(mat, chunks=(500, 50))
u, s, vt = svd_compressed(data, r, seed=4321, n_power_iter=2)
u, s, vt = da.compute(u, s, vt)
usvt = np.dot(u, np.dot(np.diag(s), vt))
tol = 0.2
assert_eq(np.linalg.norm(mat - usvt),
np.linalg.norm(mat),
rtol=tol, atol=tol) # average accuracy check
u = u[:, :r]
s = s[:r]
vt = vt[:r, :]
s_exact = np.linalg.svd(mat)[1]
s_exact = s_exact[:r]
assert_eq(np.eye(r, r), np.dot(u.T, u)) # u must be orthonormal
assert_eq(np.eye(r, r), np.dot(vt, vt.T)) # v must be orthonormal
assert_eq(s, s_exact) # s must contain the singular values
def test_svd_compressed_deterministic():
m, n = 30, 25
x = da.random.RandomState(1234).random_sample(size=(m, n), chunks=(5, 5))
u, s, vt = svd_compressed(x, 3, seed=1234)
u2, s2, vt2 = svd_compressed(x, 3, seed=1234)
assert all(da.compute((u == u2).all(), (s == s2).all(), (vt == vt2).all()))
def _check_lu_result(p, l, u, A):
assert np.allclose(p.dot(l).dot(u), A)
# check triangulars
assert np.allclose(l, np.tril(l.compute()))
assert np.allclose(u, np.triu(u.compute()))
def test_lu_1():
A1 = np.array([[7, 3, -1, 2], [3, 8, 1, -4],
[-1, 1, 4, -1], [2, -4, -1, 6] ])
A2 = np.array([[7, 0, 0, 0, 0, 0],
[0, 8, 0, 0, 0, 0],
[0, 0, 4, 0, 0, 0],
[0, 0, 0, 6, 0, 0],
[0, 0, 0, 0, 3, 0],
[0, 0, 0, 0, 0, 5]])
# without shuffle
for A, chunk in zip([A1, A2], [2, 2]):
dA = da.from_array(A, chunks=(chunk, chunk))
p, l, u = scipy.linalg.lu(A)
dp, dl, du = da.linalg.lu(dA)
assert_eq(p, dp)
assert_eq(l, dl)
assert_eq(u, du)
_check_lu_result(dp, dl, du, A)
A3 = np.array([[ 7, 3, 2, 1, 4, 1],
[ 7, 11, 5, 2, 5, 2],
[21, 25, 16, 10, 16, 5],
[21, 41, 18, 13, 16, 11],
[14, 46, 23, 24, 21, 22],
[ 0, 56, 29, 17, 14, 8]])
# with shuffle
for A, chunk in zip([A3], [2]):
dA = da.from_array(A, chunks=(chunk, chunk))
p, l, u = scipy.linalg.lu(A)
dp, dl, du = da.linalg.lu(dA)
_check_lu_result(dp, dl, du, A)
@pytest.mark.parametrize('size', [10, 20, 30, 50])
def test_lu_2(size):
np.random.seed(10)
A = np.random.random_integers(0, 10, (size, size))
dA = da.from_array(A, chunks=(5, 5))
dp, dl, du = da.linalg.lu(dA)
_check_lu_result(dp, dl, du, A)
@pytest.mark.parametrize('size', [50, 100, 200])
def test_lu_3(size):
np.random.seed(10)
A = np.random.random_integers(0, 10, (size, size))
dA = da.from_array(A, chunks=(25, 25))
dp, dl, du = da.linalg.lu(dA)
_check_lu_result(dp, dl, du, A)
def test_lu_errors():
A = np.random.random_integers(0, 10, (10, 10, 10))
dA = da.from_array(A, chunks=(5, 5, 5))
pytest.raises(ValueError, lambda: da.linalg.lu(dA))
A = np.random.random_integers(0, 10, (10, 8))
dA = da.from_array(A, chunks=(5, 4))
pytest.raises(ValueError, lambda: da.linalg.lu(dA))
A = np.random.random_integers(0, 10, (20, 20))
dA = da.from_array(A, chunks=(5, 4))
pytest.raises(ValueError, lambda: da.linalg.lu(dA))
@pytest.mark.parametrize(('shape', 'chunk'), [(20, 10), (50, 10), (70, 20)])
def test_solve_triangular_vector(shape, chunk):
np.random.seed(1)
A = np.random.random_integers(1, 10, (shape, shape))
b = np.random.random_integers(1, 10, shape)
# upper
Au = np.triu(A)
dAu = da.from_array(Au, (chunk, chunk))
db = da.from_array(b, chunk)
res = da.linalg.solve_triangular(dAu, db)
assert_eq(res, scipy.linalg.solve_triangular(Au, b))
assert_eq(dAu.dot(res), b.astype(float))
# lower
Al = np.tril(A)
dAl = da.from_array(Al, (chunk, chunk))
db = da.from_array(b, chunk)
res = da.linalg.solve_triangular(dAl, db, lower=True)
assert_eq(res, scipy.linalg.solve_triangular(Al, b, lower=True))
assert_eq(dAl.dot(res), b.astype(float))
@pytest.mark.parametrize(('shape', 'chunk'), [(20, 10), (50, 10), (50, 20)])
def test_solve_triangular_matrix(shape, chunk):
np.random.seed(1)
A = np.random.random_integers(1, 10, (shape, shape))
b = np.random.random_integers(1, 10, (shape, 5))
# upper
Au = np.triu(A)
dAu = da.from_array(Au, (chunk, chunk))
db = da.from_array(b, (chunk, 5))
res = da.linalg.solve_triangular(dAu, db)
assert_eq(res, scipy.linalg.solve_triangular(Au, b))
assert_eq(dAu.dot(res), b.astype(float))
# lower
Al = np.tril(A)
dAl = da.from_array(Al, (chunk, chunk))
db = da.from_array(b, (chunk, 5))
res = da.linalg.solve_triangular(dAl, db, lower=True)
assert_eq(res, scipy.linalg.solve_triangular(Al, b, lower=True))
assert_eq(dAl.dot(res), b.astype(float))
@pytest.mark.parametrize(('shape', 'chunk'), [(20, 10), (50, 10), (50, 20)])
def test_solve_triangular_matrix2(shape, chunk):
np.random.seed(1)
A = np.random.random_integers(1, 10, (shape, shape))
b = np.random.random_integers(1, 10, (shape, shape))
# upper
Au = np.triu(A)
dAu = da.from_array(Au, (chunk, chunk))
db = da.from_array(b, (chunk, chunk))
res = da.linalg.solve_triangular(dAu, db)
assert_eq(res, scipy.linalg.solve_triangular(Au, b))
assert_eq(dAu.dot(res), b.astype(float))
# lower
Al = np.tril(A)
dAl = da.from_array(Al, (chunk, chunk))
db = da.from_array(b, (chunk, chunk))
res = da.linalg.solve_triangular(dAl, db, lower=True)
assert_eq(res, scipy.linalg.solve_triangular(Al, b, lower=True))
assert_eq(dAl.dot(res), b.astype(float))
def test_solve_triangular_errors():
A = np.random.random_integers(0, 10, (10, 10, 10))
b = np.random.random_integers(1, 10, 10)
dA = da.from_array(A, chunks=(5, 5, 5))
db = da.from_array(b, chunks=5)
pytest.raises(ValueError, lambda: da.linalg.solve_triangular(dA, db))
A = np.random.random_integers(0, 10, (10, 10))
b = np.random.random_integers(1, 10, 10)
dA = da.from_array(A, chunks=(3, 3))
db = da.from_array(b, chunks=5)
pytest.raises(ValueError, lambda: da.linalg.solve_triangular(dA, db))
@pytest.mark.parametrize(('shape', 'chunk'), [(20, 10), (50, 10)])
def test_solve(shape, chunk):
np.random.seed(1)
A = np.random.random_integers(1, 10, (shape, shape))
dA = da.from_array(A, (chunk, chunk))
# vector
b = np.random.random_integers(1, 10, shape)
db = da.from_array(b, chunk)
res = da.linalg.solve(dA, db)
assert_eq(res, scipy.linalg.solve(A, b))
assert_eq(dA.dot(res), b.astype(float))
# tall-and-skinny matrix
b = np.random.random_integers(1, 10, (shape, 5))
db = da.from_array(b, (chunk, 5))
res = da.linalg.solve(dA, db)
assert_eq(res, scipy.linalg.solve(A, b))
assert_eq(dA.dot(res), b.astype(float))
# matrix
b = np.random.random_integers(1, 10, (shape, shape))
db = da.from_array(b, (chunk, chunk))
res = da.linalg.solve(dA, db)
assert_eq(res, scipy.linalg.solve(A, b))
assert_eq(dA.dot(res), b.astype(float))
@pytest.mark.parametrize(('shape', 'chunk'), [(20, 10), (50, 10)])
def test_inv(shape, chunk):
np.random.seed(1)
A = np.random.random_integers(1, 10, (shape, shape))
dA = da.from_array(A, (chunk, chunk))
res = da.linalg.inv(dA)
assert_eq(res, scipy.linalg.inv(A))
assert_eq(dA.dot(res), np.eye(shape, dtype=float))
def _get_symmat(size):
np.random.seed(1)
A = np.random.random_integers(1, 20, (size, size))
lA = np.tril(A)
return lA.dot(lA.T)
@pytest.mark.parametrize(('shape', 'chunk'), [(20, 10), (30, 6)])
def test_solve_sym_pos(shape, chunk):
np.random.seed(1)
A = _get_symmat(shape)
dA = da.from_array(A, (chunk, chunk))
# vector
b = np.random.random_integers(1, 10, shape)
db = da.from_array(b, chunk)
res = da.linalg.solve(dA, db, sym_pos=True)
assert_eq(res, scipy.linalg.solve(A, b, sym_pos=True))
assert_eq(dA.dot(res), b.astype(float))
# tall-and-skinny matrix
b = np.random.random_integers(1, 10, (shape, 5))
db = da.from_array(b, (chunk, 5))
res = da.linalg.solve(dA, db, sym_pos=True)
assert_eq(res, scipy.linalg.solve(A, b, sym_pos=True))
assert_eq(dA.dot(res), b.astype(float))
# matrix
b = np.random.random_integers(1, 10, (shape, shape))
db = da.from_array(b, (chunk, chunk))
res = da.linalg.solve(dA, db, sym_pos=True)
assert_eq(res, scipy.linalg.solve(A, b, sym_pos=True))
assert_eq(dA.dot(res), b.astype(float))
@pytest.mark.parametrize(('shape', 'chunk'), [(20, 10), (12, 3), (30, 3), (30, 6)])
def test_cholesky(shape, chunk):
A = _get_symmat(shape)
dA = da.from_array(A, (chunk, chunk))
assert_eq(da.linalg.cholesky(dA), scipy.linalg.cholesky(A))
assert_eq(da.linalg.cholesky(dA, lower=True), scipy.linalg.cholesky(A, lower=True))
@pytest.mark.parametrize(("nrow", "ncol", "chunk"),
[(20, 10, 5), (100, 10, 10)])
def test_lstsq(nrow, ncol, chunk):
np.random.seed(1)
A = np.random.random_integers(1, 20, (nrow, ncol))
b = np.random.random_integers(1, 20, nrow)
dA = da.from_array(A, (chunk, ncol))
db = da.from_array(b, chunk)
x, r, rank, s = np.linalg.lstsq(A, b)
dx, dr, drank, ds = da.linalg.lstsq(dA, db)
assert_eq(dx, x)
assert_eq(dr, r)
assert drank.compute() == rank
assert_eq(ds, s)
# reduce rank causes multicollinearity, only compare rank
A[:, 1] = A[:, 2]
dA = da.from_array(A, (chunk, ncol))
db = da.from_array(b, chunk)
x, r, rank, s = np.linalg.lstsq(A, b)
assert rank == ncol - 1
dx, dr, drank, ds = da.linalg.lstsq(dA, db)
assert drank.compute() == rank
| |
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handling of block device information and mapping.
This module contains helper methods for interpreting the block
device information and determining the suitable mapping to
guest devices and libvirt XML.
Throughout these methods there are a number of standard
variables / types used
* 'mapping': a dict contains the storage device mapping.
For the default disk types it will contain the following
keys & values:
'disk' -> disk_info
'disk.rescue' -> disk_info
'disk.local' -> disk_info
'disk.swap' -> disk_info
'disk.config' -> disk_info
If any of the default disks are overridden by the block
device info mappings, the hash value will be None
For any ephemeral device there will also be a dict entry
'disk.eph$NUM' -> disk_info
For any volume device there will also be a dict entry:
$path -> disk_info
Finally a special key will refer to the root device:
'root' -> disk_info
* 'disk_info': a dict specifying disk configuration
It contains the following 3 required fields
bus (disk_bus), dev (disk_dev), type (device_type)
and possibly these optional fields: ('format', 'boot_index')
* 'disk_bus': the guest bus type ('ide', 'virtio', 'scsi', etc)
* 'disk_dev': the device name 'vda', 'hdc', 'sdf', 'xvde' etc
* 'device_type': type of device eg 'disk', 'cdrom', 'floppy'
* 'format': Which format to apply to the device if applicable
* 'boot_index': Number designating the boot order of the device
"""
import itertools
import operator
from oslo_config import cfg
from nova import block_device
from nova import exception
from nova.i18n import _
from nova.objects import base as obj_base
from nova.objects import fields as obj_fields
from nova.virt import configdrive
from nova.virt import driver
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import osinfo
CONF = cfg.CONF
SUPPORTED_DEVICE_TYPES = ('disk', 'cdrom', 'floppy', 'lun')
BOOT_DEV_FOR_TYPE = {'disk': 'hd', 'cdrom': 'cdrom', 'floppy': 'fd'}
def has_disk_dev(mapping, disk_dev):
"""Determine if a disk device name has already been used.
Looks at all the keys in mapping to see if any
corresponding disk_info tuple has a device name
matching disk_dev
Returns True if the disk_dev is in use.
"""
for disk in mapping:
info = mapping[disk]
if info['dev'] == disk_dev:
return True
return False
def get_dev_prefix_for_disk_bus(disk_bus):
"""Determine the dev prefix for a disk bus.
Determine the dev prefix to be combined
with a disk number to fix a disk_dev.
eg 'hd' for 'ide' bus can be used to
form a disk dev 'hda'
Returns the dev prefix or raises an
exception if the disk bus is unknown.
"""
if CONF.libvirt.disk_prefix:
return CONF.libvirt.disk_prefix
if disk_bus == "ide":
return "hd"
elif disk_bus == "virtio":
return "vd"
elif disk_bus == "xen":
return "xvd"
elif disk_bus == "scsi":
return "sd"
elif disk_bus == "usb":
return "sd"
elif disk_bus == "fdc":
return "fd"
elif disk_bus == "uml":
return "ubd"
elif disk_bus == "lxc":
return None
elif disk_bus == "sata":
return "sd"
else:
raise exception.InternalError(
_("Unable to determine disk prefix for %s") %
disk_bus)
def get_dev_count_for_disk_bus(disk_bus):
"""Determine the number disks supported.
Determine how many disks can be supported in
a single VM for a particular disk bus.
Returns the number of disks supported.
"""
if disk_bus == "ide":
return 4
else:
return 26
def find_disk_dev_for_disk_bus(mapping, bus,
assigned_devices=None):
"""Identify a free disk dev name for a bus.
Determines the possible disk dev names for
the bus, and then checks them in order until
it identifies one that is not yet used in the
disk mapping.
Returns the chosen disk_dev name, or raises an
exception if none is available.
"""
dev_prefix = get_dev_prefix_for_disk_bus(bus)
if dev_prefix is None:
return None
if assigned_devices is None:
assigned_devices = []
max_dev = get_dev_count_for_disk_bus(bus)
devs = range(max_dev)
for idx in devs:
disk_dev = dev_prefix + chr(ord('a') + idx)
if not has_disk_dev(mapping, disk_dev):
if disk_dev not in assigned_devices:
return disk_dev
msg = _("No free disk device names for prefix '%s'") % dev_prefix
raise exception.InternalError(msg)
def is_disk_bus_valid_for_virt(virt_type, disk_bus):
valid_bus = {
'qemu': ['virtio', 'scsi', 'ide', 'usb', 'fdc', 'sata'],
'kvm': ['virtio', 'scsi', 'ide', 'usb', 'fdc', 'sata'],
'xen': ['xen', 'ide'],
'uml': ['uml'],
'lxc': ['lxc'],
'parallels': ['ide', 'scsi']
}
if virt_type not in valid_bus:
raise exception.UnsupportedVirtType(virt=virt_type)
return disk_bus in valid_bus[virt_type]
def get_disk_bus_for_device_type(instance,
virt_type,
image_meta,
device_type="disk"):
"""Determine the best disk bus to use for a device type.
Considering the currently configured virtualization
type, return the optimal disk_bus to use for a given
device type. For example, for a disk on KVM it will
return 'virtio', while for a CDROM it will return 'ide'
on x86_64 and 'scsi' on ppc64.
Returns the disk_bus, or returns None if the device
type is not supported for this virtualization
"""
# Prefer a disk bus set against the image first of all
if device_type == "disk":
disk_bus = osinfo.HardwareProperties(image_meta).disk_model
else:
key = "hw_" + device_type + "_bus"
disk_bus = image_meta.properties.get(key)
if disk_bus is not None:
if not is_disk_bus_valid_for_virt(virt_type, disk_bus):
raise exception.UnsupportedHardware(model=disk_bus,
virt=virt_type)
return disk_bus
# Otherwise pick a hypervisor default disk bus
if virt_type == "uml":
if device_type == "disk":
return "uml"
elif virt_type == "lxc":
return "lxc"
elif virt_type == "xen":
guest_vm_mode = obj_fields.VMMode.get_from_instance(instance)
if guest_vm_mode == obj_fields.VMMode.HVM:
return "ide"
else:
return "xen"
elif virt_type in ("qemu", "kvm"):
if device_type == "cdrom":
guestarch = libvirt_utils.get_arch(image_meta)
if guestarch in (
obj_fields.Architecture.PPC,
obj_fields.Architecture.PPC64,
obj_fields.Architecture.PPCLE,
obj_fields.Architecture.PPC64LE,
obj_fields.Architecture.S390,
obj_fields.Architecture.S390X,
obj_fields.Architecture.AARCH64):
return "scsi"
else:
return "ide"
elif device_type == "disk":
return "virtio"
elif device_type == "floppy":
return "fdc"
elif virt_type == "parallels":
if device_type == "cdrom":
return "ide"
elif device_type == "disk":
return "scsi"
else:
# If virt-type not in list then it is unsupported
raise exception.UnsupportedVirtType(virt=virt_type)
return None
def get_disk_bus_for_disk_dev(virt_type, disk_dev):
"""Determine the disk bus for a disk device.
Given a disk device like 'hda', 'sdf', 'xvdb', etc
guess what the most appropriate disk bus is for
the currently configured virtualization technology
Returns the disk bus, or raises an Exception if
the disk device prefix is unknown.
"""
if disk_dev.startswith('hd'):
return "ide"
elif disk_dev.startswith('sd'):
# Reverse mapping 'sd' is not reliable
# there are many possible mappings. So
# this picks the most likely mappings
if virt_type == "xen":
return "xen"
else:
return "scsi"
elif disk_dev.startswith('vd'):
return "virtio"
elif disk_dev.startswith('fd'):
return "fdc"
elif disk_dev.startswith('xvd'):
return "xen"
elif disk_dev.startswith('ubd'):
return "uml"
else:
msg = _("Unable to determine disk bus for '%s'") % disk_dev[:1]
raise exception.InternalError(msg)
def get_next_disk_info(mapping, disk_bus,
device_type='disk',
boot_index=None,
assigned_devices=None):
"""Determine the disk info for the next device on disk_bus.
Considering the disks already listed in the disk mapping,
determine the next available disk dev that can be assigned
for the disk bus.
Returns the disk_info for the next available disk.
"""
disk_dev = find_disk_dev_for_disk_bus(mapping,
disk_bus,
assigned_devices)
info = {'bus': disk_bus,
'dev': disk_dev,
'type': device_type}
if boot_index is not None and boot_index >= 0:
info['boot_index'] = str(boot_index)
return info
def get_eph_disk(index):
return 'disk.eph' + str(index)
def get_config_drive_type():
"""Determine the type of config drive.
If config_drive_format is set to iso9660 then the config drive will
be 'cdrom', otherwise 'disk'.
Returns a string indicating the config drive type.
"""
if CONF.config_drive_format == 'iso9660':
config_drive_type = 'cdrom'
elif CONF.config_drive_format == 'vfat':
config_drive_type = 'disk'
else:
raise exception.ConfigDriveUnknownFormat(
format=CONF.config_drive_format)
return config_drive_type
def get_info_from_bdm(instance, virt_type, image_meta, bdm,
mapping=None, disk_bus=None,
dev_type=None, allowed_types=None,
assigned_devices=None):
mapping = mapping or {}
allowed_types = allowed_types or SUPPORTED_DEVICE_TYPES
device_name = block_device.strip_dev(get_device_name(bdm))
bdm_type = bdm.get('device_type') or dev_type
if bdm_type not in allowed_types:
bdm_type = 'disk'
bdm_bus = bdm.get('disk_bus') or disk_bus
if not is_disk_bus_valid_for_virt(virt_type, bdm_bus):
if device_name:
bdm_bus = get_disk_bus_for_disk_dev(virt_type, device_name)
else:
bdm_bus = get_disk_bus_for_device_type(instance, virt_type,
image_meta, bdm_type)
if not device_name:
if assigned_devices:
padded_mapping = {dev: {'dev': dev} for dev in assigned_devices}
padded_mapping.update(mapping)
else:
padded_mapping = mapping
device_name = find_disk_dev_for_disk_bus(padded_mapping, bdm_bus)
bdm_info = {'bus': bdm_bus,
'dev': device_name,
'type': bdm_type}
bdm_format = bdm.get('guest_format')
if bdm_format:
bdm_info.update({'format': bdm_format})
boot_index = bdm.get('boot_index')
if boot_index is not None and boot_index >= 0:
# NOTE(ndipanov): libvirt starts ordering from 1, not 0
bdm_info['boot_index'] = str(boot_index + 1)
return bdm_info
def get_device_name(bdm):
"""Get the device name if present regardless of the bdm format."""
if isinstance(bdm, obj_base.NovaObject):
return bdm.device_name
else:
return bdm.get('device_name') or bdm.get('mount_device')
def get_root_info(instance, virt_type, image_meta, root_bdm,
disk_bus, cdrom_bus, root_device_name=None):
# NOTE (ndipanov): This is a hack to avoid considering an image
# BDM with local target, as we don't support them
# yet. Only applies when passed non-driver format
no_root_bdm = (not root_bdm or (
root_bdm.get('source_type') == 'image' and
root_bdm.get('destination_type') == 'local'))
if no_root_bdm:
# NOTE(mriedem): In case the image_meta object was constructed from
# an empty dict, like in the case of evacuate, we have to first check
# if disk_format is set on the ImageMeta object.
if (image_meta.obj_attr_is_set('disk_format') and
image_meta.disk_format == 'iso'):
root_device_bus = cdrom_bus
root_device_type = 'cdrom'
else:
root_device_bus = disk_bus
root_device_type = 'disk'
if not root_device_name:
root_device_name = find_disk_dev_for_disk_bus({}, root_device_bus)
return {'bus': root_device_bus,
'type': root_device_type,
'dev': block_device.strip_dev(root_device_name),
'boot_index': '1'}
if not get_device_name(root_bdm) and root_device_name:
root_bdm = root_bdm.copy()
# it can happen, eg for libvirt+Xen, that the root_device_name is
# incompatible with the disk bus. In that case fix the root_device_name
if virt_type == 'xen':
dev_prefix = get_dev_prefix_for_disk_bus(disk_bus)
if not root_device_name.startswith(dev_prefix):
letter = block_device.get_device_letter(root_device_name)
root_device_name = '%s%s' % (dev_prefix, letter)
root_bdm['device_name'] = root_device_name
return get_info_from_bdm(instance, virt_type, image_meta,
root_bdm, {}, disk_bus)
def default_device_names(virt_type, context, instance, block_device_info,
image_meta):
get_disk_info(virt_type, instance, image_meta, block_device_info)
for driver_bdm in itertools.chain(block_device_info['ephemerals'],
[block_device_info['swap']] if
block_device_info['swap'] else [],
block_device_info['block_device_mapping']):
driver_bdm.save()
def get_default_ephemeral_info(instance, disk_bus, block_device_info, mapping):
ephemerals = driver.block_device_info_get_ephemerals(block_device_info)
if not instance.ephemeral_gb or instance.ephemeral_gb <= 0 or ephemerals:
return None
else:
info = get_next_disk_info(mapping, disk_bus)
if block_device.volume_in_mapping(info['dev'], block_device_info):
return None
return info
def update_bdm(bdm, info):
device_name_field = ('device_name'
if 'device_name' in bdm
else 'mount_device')
# Do not update the device name if it was already present
bdm.update(dict(zip((device_name_field,
'disk_bus', 'device_type'),
((bdm.get(device_name_field) or
block_device.prepend_dev(info['dev'])),
info['bus'], info['type']))))
def get_disk_mapping(virt_type, instance,
disk_bus, cdrom_bus,
image_meta,
block_device_info=None,
rescue=False):
"""Determine how to map default disks to the virtual machine.
This is about figuring out whether the default 'disk',
'disk.local', 'disk.swap' and 'disk.config' images have
been overridden by the block device mapping.
Returns the guest disk mapping for the devices.
"""
mapping = {}
if rescue:
rescue_info = get_next_disk_info(mapping,
disk_bus, boot_index=1)
mapping['disk.rescue'] = rescue_info
mapping['root'] = rescue_info
os_info = get_next_disk_info(mapping,
disk_bus)
mapping['disk'] = os_info
if configdrive.required_by(instance):
device_type = get_config_drive_type()
disk_bus = get_disk_bus_for_device_type(instance,
virt_type,
image_meta,
device_type)
config_info = get_next_disk_info(mapping,
disk_bus,
device_type)
mapping['disk.config.rescue'] = config_info
return mapping
pre_assigned_device_names = \
[block_device.strip_dev(get_device_name(bdm)) for bdm in itertools.chain(
driver.block_device_info_get_ephemerals(block_device_info),
[driver.block_device_info_get_swap(block_device_info)],
driver.block_device_info_get_mapping(block_device_info))
if get_device_name(bdm)]
# NOTE (ndipanov): root_bdm can be None when we boot from image
# as there is no driver representation of local targeted images
# and they will not be in block_device_info list.
root_bdm = block_device.get_root_bdm(
driver.block_device_info_get_mapping(block_device_info))
root_device_name = block_device.strip_dev(
driver.block_device_info_get_root_device(block_device_info))
root_info = get_root_info(
instance, virt_type, image_meta, root_bdm,
disk_bus, cdrom_bus, root_device_name)
mapping['root'] = root_info
# NOTE (ndipanov): This implicitly relies on image->local BDMs not
# being considered in the driver layer - so missing
# bdm with boot_index 0 means - use image, unless it was
# overridden. This can happen when using legacy syntax and
# no root_device_name is set on the instance.
if not root_bdm and not block_device.volume_in_mapping(root_info['dev'],
block_device_info):
mapping['disk'] = root_info
elif root_bdm:
# NOTE (ft): If device name is not set in root bdm, root_info has a
# generated one. We have to copy device name to root bdm to prevent its
# second generation in loop through bdms. If device name is already
# set, nothing is changed.
update_bdm(root_bdm, root_info)
default_eph = get_default_ephemeral_info(instance, disk_bus,
block_device_info, mapping)
if default_eph:
mapping['disk.local'] = default_eph
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
eph_info = get_info_from_bdm(
instance, virt_type, image_meta, eph, mapping, disk_bus,
assigned_devices=pre_assigned_device_names)
mapping[get_eph_disk(idx)] = eph_info
update_bdm(eph, eph_info)
swap = driver.block_device_info_get_swap(block_device_info)
if swap and swap.get('swap_size', 0) > 0:
swap_info = get_info_from_bdm(
instance, virt_type, image_meta,
swap, mapping, disk_bus)
mapping['disk.swap'] = swap_info
update_bdm(swap, swap_info)
elif instance.get_flavor()['swap'] > 0:
swap_info = get_next_disk_info(mapping, disk_bus,
assigned_devices=pre_assigned_device_names)
if not block_device.volume_in_mapping(swap_info['dev'],
block_device_info):
mapping['disk.swap'] = swap_info
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for bdm in block_device_mapping:
vol_info = get_info_from_bdm(
instance, virt_type, image_meta, bdm, mapping,
assigned_devices=pre_assigned_device_names)
mapping[block_device.prepend_dev(vol_info['dev'])] = vol_info
update_bdm(bdm, vol_info)
if configdrive.required_by(instance):
device_type = get_config_drive_type()
disk_bus = get_disk_bus_for_device_type(instance,
virt_type,
image_meta,
device_type)
config_info = get_next_disk_info(mapping,
disk_bus,
device_type)
mapping['disk.config'] = config_info
return mapping
def get_disk_info(virt_type, instance, image_meta,
block_device_info=None, rescue=False):
"""Determine guest disk mapping info.
This is a wrapper around get_disk_mapping, which
also returns the chosen disk_bus and cdrom_bus.
The returned data is in a dict
- disk_bus: the bus for harddisks
- cdrom_bus: the bus for CDROMs
- mapping: the disk mapping
Returns the disk mapping disk.
"""
disk_bus = get_disk_bus_for_device_type(instance, virt_type,
image_meta, "disk")
cdrom_bus = get_disk_bus_for_device_type(instance, virt_type,
image_meta, "cdrom")
mapping = get_disk_mapping(virt_type, instance,
disk_bus, cdrom_bus,
image_meta,
block_device_info,
rescue)
return {'disk_bus': disk_bus,
'cdrom_bus': cdrom_bus,
'mapping': mapping}
def get_boot_order(disk_info):
boot_mapping = (info for name, info in disk_info['mapping'].items()
if name != 'root' and info.get('boot_index') is not None)
boot_devs_dup = (BOOT_DEV_FOR_TYPE[dev['type']] for dev in
sorted(boot_mapping,
key=operator.itemgetter('boot_index')))
def uniq(lst):
s = set()
return [el for el in lst if el not in s and not s.add(el)]
return uniq(boot_devs_dup)
| |
# -*- coding: utf-8 -*-
'''
Manage events
Events are all fired off via a zeromq 'pub' socket, and listened to with local
zeromq 'sub' sockets
All of the formatting is self contained in the event module, so we should be
able to modify the structure in the future since the same module used to read
events is the same module used to fire off events.
Old style event messages were comprised of two parts delimited at the 20 char
point. The first 20 characters are used for the zeromq subscriber to match
publications and 20 characters was chosen because it was at the time a few more
characters than the length of a jid (Job ID). Any tags of length less than 20
characters were padded with "|" chars out to 20 characters.
Although not explicit, the data for an event comprised a python dict that was
serialized by msgpack.
New style event messages support event tags longer than 20 characters while
still being backwards compatible with old style tags.
The longer tags better enable name spaced event tags which tend to be longer.
Moreover, the constraint that the event data be a python dict is now an
explicit constraint and fire-event will now raise a ValueError if not. Tags
must be ascii safe strings, that is, have values less than 0x80
Since the msgpack dict (map) indicators have values greater than or equal to
0x80 it can be unambiguously determined if the start of data is at char 21
or not.
In the new style, when the tag is longer than 20 characters, an end of tag
string is appended to the tag given by the string constant TAGEND, that is, two
line feeds '\n\n'. When the tag is less than 20 characters then the tag is
padded with pipes "|" out to 20 characters as before. When the tag is exactly
20 characters no padded is done.
The get_event method intelligently figures out if the tag is longer than 20
characters.
The convention for namespacing is to use dot characters "." as the name space
delimiter. The name space "salt" is reserved by SaltStack for internal events.
For example:
Namespaced tag
'salt.runner.manage.status.start'
'''
from __future__ import absolute_import
# Import python libs
import os
import time
import errno
import signal
import fnmatch
import hashlib
import logging
import datetime
import multiprocessing
from collections import MutableMapping
# Import third party libs
import salt.ext.six as six
try:
import zmq
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
import zmq.eventloop.zmqstream
except ImportError:
# Local mode does not need zmq
pass
# Import salt libs
import salt.config
import salt.payload
import salt.loader
import salt.utils
import salt.utils.cache
import salt.utils.dicttrim
import salt.utils.process
import salt.utils.zeromq
log = logging.getLogger(__name__)
# The SUB_EVENT set is for functions that require events fired based on
# component executions, like the state system
SUB_EVENT = set([
'state.highstate',
'state.sls',
])
TAGEND = '\n\n' # long tag delimiter
TAGPARTER = '/' # name spaced tag delimiter
SALT = 'salt' # base prefix for all salt/ events
# dict map of namespaced base tag prefixes for salt events
TAGS = {
'auth': 'auth', # prefix for all salt/auth events
'job': 'job', # prefix for all salt/job events (minion jobs)
'key': 'key', # prefix for all salt/key events
'minion': 'minion', # prefix for all salt/minion events
# (minion sourced events)
'syndic': 'syndic', # prefix for all salt/syndic events
# (syndic minion sourced events)
'run': 'run', # prefix for all salt/run events (salt runners)
'wheel': 'wheel', # prefix for all salt/wheel events
'cloud': 'cloud', # prefix for all salt/cloud events
'fileserver': 'fileserver', # prefix for all salt/fileserver events
'queue': 'queue', # prefix for all salt/queue events
}
def get_event(node, sock_dir=None, transport='zeromq', opts=None, listen=True):
'''
Return an event object suitable for the named transport
'''
sock_dir = sock_dir or opts['sock_dir']
# TODO: AIO core is separate from transport
if transport in ('zeromq', 'tcp'):
if node == 'master':
return MasterEvent(sock_dir, opts, listen=listen)
return SaltEvent(node, sock_dir, opts, listen=listen)
elif transport == 'raet':
import salt.utils.raetevent
return salt.utils.raetevent.RAETEvent(node,
sock_dir=sock_dir,
listen=listen,
opts=opts)
def get_master_event(opts, sock_dir, listen=True):
'''
Return an event object suitable for the named transport
'''
# TODO: AIO core is separate from transport
if opts['transport'] in ('zeromq', 'tcp'):
return MasterEvent(sock_dir, opts, listen=listen)
elif opts['transport'] == 'raet':
import salt.utils.raetevent
return salt.utils.raetevent.MasterEvent(
opts=opts, sock_dir=sock_dir, listen=listen
)
def tagify(suffix='', prefix='', base=SALT):
'''
convenience function to build a namespaced event tag string
from joining with the TABPART character the base, prefix and suffix
If string prefix is a valid key in TAGS Then use the value of key prefix
Else use prefix string
If suffix is a list Then join all string elements of suffix individually
Else use string suffix
'''
parts = [base, TAGS.get(prefix, prefix)]
if hasattr(suffix, 'append'): # list so extend parts
parts.extend(suffix)
else: # string so append
parts.append(suffix)
return TAGPARTER.join([part for part in parts if part])
class SaltEvent(object):
'''
Warning! Use the get_event function or the code will not be
RAET compatible
The base class used to manage salt events
'''
def __init__(self, node, sock_dir=None, opts=None, listen=True):
self.serial = salt.payload.Serial({'serial': 'msgpack'})
self.context = zmq.Context()
self.poller = zmq.Poller()
self.cpub = False
self.cpush = False
if opts is None:
opts = {}
if node == 'master':
self.opts = salt.config.DEFAULT_MASTER_OPTS.copy()
else:
self.opts = salt.config.DEFAULT_MINION_OPTS.copy()
self.opts.update(opts)
if sock_dir is None:
sock_dir = self.opts['sock_dir']
else:
self.opts['sock_dir'] = sock_dir
if salt.utils.is_windows() and not hasattr(self.opts, 'ipc_mode'):
self.opts['ipc_mode'] = 'tcp'
self.puburi, self.pulluri = self.__load_uri(sock_dir, node)
self.pending_tags = []
self.pending_events = []
if not self.cpub:
self.connect_pub()
self.__load_cache_regex()
@classmethod
def __load_cache_regex(cls):
'''
Initialize the regular expression cache and put it in the
class namespace. The regex search strings will be prepend with '^'
'''
# This is in the class namespace, to minimize cache memory
# usage and maximize cache hits
# The prepend='^' is to reduce differences in behavior between
# the default 'startswith' and the optional 'regex' match_type
cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^')
def __load_uri(self, sock_dir, node):
'''
Return the string URI for the location of the pull and pub sockets to
use for firing and listening to events
'''
if node == 'master':
if self.opts['ipc_mode'] == 'tcp':
puburi = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_master_pub_port']
)
pulluri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_master_pull_port']
)
else:
puburi = 'ipc://{0}'.format(os.path.join(
sock_dir,
'master_event_pub.ipc'
))
salt.utils.zeromq.check_ipc_path_max_len(puburi)
pulluri = 'ipc://{0}'.format(os.path.join(
sock_dir,
'master_event_pull.ipc'
))
salt.utils.zeromq.check_ipc_path_max_len(pulluri)
else:
if self.opts['ipc_mode'] == 'tcp':
puburi = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pub_port']
)
pulluri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pull_port']
)
else:
hash_type = getattr(hashlib, self.opts['hash_type'])
# Only use the first 10 chars to keep longer hashes from exceeding the
# max socket path length.
id_hash = hash_type(salt.utils.to_bytes(self.opts['id'])).hexdigest()[:10]
puburi = 'ipc://{0}'.format(os.path.join(
sock_dir,
'minion_event_{0}_pub.ipc'.format(id_hash)
))
salt.utils.zeromq.check_ipc_path_max_len(puburi)
pulluri = 'ipc://{0}'.format(os.path.join(
sock_dir,
'minion_event_{0}_pull.ipc'.format(id_hash)
))
salt.utils.zeromq.check_ipc_path_max_len(pulluri)
log.debug(
'{0} PUB socket URI: {1}'.format(self.__class__.__name__, puburi)
)
log.debug(
'{0} PULL socket URI: {1}'.format(self.__class__.__name__, pulluri)
)
return puburi, pulluri
def subscribe(self, tag=None, match_type=None):
'''
Subscribe to events matching the passed tag.
If you do not subscribe to a tag, events will be discarded by calls to
get_event that request a different tag. In contexts where many different
jobs are outstanding it is important to subscribe to prevent one call
to get_event from discarding a response required by a subsequent call
to get_event.
'''
if tag is None:
return
match_func = self._get_match_func(match_type)
self.pending_tags.append([tag, match_func])
def unsubscribe(self, tag, match_type=None):
'''
Un-subscribe to events matching the passed tag.
'''
if tag is None:
return
match_func = self._get_match_func(match_type)
self.pending_tags.remove([tag, match_func])
old_events = self.pending_events
self.pending_events = []
for evt in old_events:
if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags):
self.pending_events.append(evt)
def connect_pub(self):
'''
Establish the publish connection
'''
self.sub = self.context.socket(zmq.SUB)
try:
self.sub.setsockopt(zmq.HWM, self.opts['salt_event_pub_hwm'])
except AttributeError:
self.sub.setsockopt(zmq.SNDHWM, self.opts['salt_event_pub_hwm'])
self.sub.setsockopt(zmq.RCVHWM, self.opts['salt_event_pub_hwm'])
self.sub.connect(self.puburi)
self.poller.register(self.sub, zmq.POLLIN)
self.sub.setsockopt_string(zmq.SUBSCRIBE, u'')
self.sub.setsockopt(zmq.LINGER, 5000)
self.cpub = True
def connect_pull(self, timeout=1000):
'''
Establish a connection with the event pull socket
Set the linger timeout of the socket options to timeout (in milliseconds)
Default timeout is 1000 ms
'''
self.push = self.context.socket(zmq.PUSH)
self.push.setsockopt(zmq.LINGER, timeout)
self.push.connect(self.pulluri)
self.cpush = True
@classmethod
def unpack(cls, raw, serial=None):
if serial is None:
serial = salt.payload.Serial({'serial': 'msgpack'})
mtag, sep, mdata = raw.partition(TAGEND) # split tag from data
data = serial.loads(mdata)
return mtag, data
def _get_match_func(self, match_type=None):
if match_type is None:
match_type = self.opts['event_match_type']
return getattr(self, '_match_tag_{0}'.format(match_type), None)
def _check_pending(self, tag, match_func=None):
"""Check the pending_events list for events that match the tag
:param tag: The tag to search for
:type tag: str
:param tags_regex: List of re expressions to search for also
:type tags_regex: list[re.compile()]
:return:
"""
if match_func is None:
match_func = self._get_match_func()
old_events = self.pending_events
self.pending_events = []
ret = None
for evt in old_events:
if match_func(evt['tag'], tag):
if ret is None:
ret = evt
log.trace('get_event() returning cached event = {0}'.format(ret))
else:
self.pending_events.append(evt)
elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags):
self.pending_events.append(evt)
else:
log.trace('get_event() discarding cached event that no longer has any subscriptions = {0}'.format(evt))
return ret
@staticmethod
def _match_tag_startswith(event_tag, search_tag):
'''
Check if the event_tag matches the search check.
Uses startswith to check.
Return True (matches) or False (no match)
'''
return event_tag.startswith(search_tag)
@staticmethod
def _match_tag_endswith(event_tag, search_tag):
'''
Check if the event_tag matches the search check.
Uses endswith to check.
Return True (matches) or False (no match)
'''
return event_tag.endswith(search_tag)
@staticmethod
def _match_tag_find(event_tag, search_tag):
'''
Check if the event_tag matches the search check.
Uses find to check.
Return True (matches) or False (no match)
'''
return event_tag.find(search_tag) >= 0
def _match_tag_regex(self, event_tag, search_tag):
'''
Check if the event_tag matches the search check.
Uses regular expression search to check.
Return True (matches) or False (no match)
'''
return self.cache_regex.get(search_tag).search(event_tag) is not None
def _match_tag_fnmatch(self, event_tag, search_tag):
'''
Check if the event_tag matches the search check.
Uses fnmatch to check.
Return True (matches) or False (no match)
'''
return fnmatch.fnmatch(event_tag, search_tag)
def _get_event(self, wait, tag, match_func=None, no_block=False):
if match_func is None:
match_func = self._get_match_func()
start = time.time()
timeout_at = start + wait
run_once = False
if no_block is True:
wait = 0
while (run_once is False and not wait) or time.time() <= timeout_at:
if no_block is True:
if run_once is True:
break
# Trigger that at least a single iteration has gone through
run_once = True
try:
# convert to milliseconds
socks = dict(self.poller.poll(wait * 1000))
if socks.get(self.sub) != zmq.POLLIN:
continue
ret = self.get_event_block()
except zmq.ZMQError as ex:
if ex.errno == errno.EAGAIN or ex.errno == errno.EINTR:
continue
else:
raise
if not match_func(ret['tag'], tag):
# tag not match
if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags):
log.trace('get_event() caching unwanted event = {0}'.format(ret))
self.pending_events.append(ret)
if wait: # only update the wait timeout if we had one
wait = timeout_at - time.time()
continue
log.trace('get_event() received = {0}'.format(ret))
return ret
log.trace('_get_event() waited {0} seconds and received nothing'.format(wait * 1000))
return None
def get_event(self,
wait=5,
tag='',
full=False,
use_pending=None,
pending_tags=None,
match_type=None,
no_block=False):
'''
Get a single publication.
IF no publication available THEN block for up to wait seconds
AND either return publication OR None IF no publication available.
IF wait is 0 then block forever.
tag
Only return events matching the given tag. If not specified, or set
to an empty string, all events are returned. It is recommended to
always be selective on what is to be returned in the event that
multiple requests are being multiplexed
match_type
Set the function to match the search tag with event tags.
- 'startswith' : search for event tags that start with tag
- 'endswith' : search for event tags that end with tag
- 'find' : search for event tags that contain tag
- 'regex' : regex search '^' + tag event tags
- 'fnmatch' : fnmatch tag event tags matching
Default is opts['event_match_type'] or 'startswith'
.. versionadded:: 2015.8.0
no_block
Define if getting the event should be a blocking call or not.
Defaults to False to keep backwards compatibility.
.. versionadded:: 2015.8.0
Notes:
Searches cached publications first. If no cached publications are found
that match the given tag specification, new publications are received
and checked.
If a publication is received that does not match the tag specification,
it is DISCARDED unless it is subscribed to via subscribe() which will
cause it to be cached.
If a caller is not going to call get_event immediately after sending a
request, it MUST subscribe the result to ensure the response is not lost
should other regions of code call get_event for other purposes.
'''
if use_pending is not None:
salt.utils.warn_until(
'Nitrogen',
'The \'use_pending\' keyword argument is deprecated and is simply ignored. '
'Please stop using it since it\'s support will be removed in {version}.'
)
if pending_tags is not None:
salt.utils.warn_until(
'Nitrogen',
'The \'pending_tags\' keyword argument is deprecated and is simply ignored. '
'Please stop using it since it\'s support will be removed in {version}.'
)
match_func = self._get_match_func(match_type)
ret = self._check_pending(tag, match_func)
if ret is None:
ret = self._get_event(wait, tag, match_func, no_block)
if ret is None or full:
return ret
else:
return ret['data']
def get_event_noblock(self):
'''Get the raw event without blocking or any other niceties
'''
if not self.cpub:
self.connect_pub()
raw = self.sub.recv(zmq.NOBLOCK)
mtag, data = self.unpack(raw, self.serial)
return {'data': data, 'tag': mtag}
def get_event_block(self):
'''Get the raw event in a blocking fashion
Slower, but decreases the possibility of dropped events
'''
raw = self.sub.recv()
mtag, data = self.unpack(raw, self.serial)
return {'data': data, 'tag': mtag}
def iter_events(self, tag='', full=False, match_type=None):
'''
Creates a generator that continuously listens for events
'''
while True:
data = self.get_event(tag=tag, full=full, match_type=match_type)
if data is None:
continue
yield data
def fire_event(self, data, tag, timeout=1000):
'''
Send a single event into the publisher with payload dict "data" and
event identifier "tag"
The default is 1000 ms
Note the linger timeout must be at least as long as this timeout
'''
if not str(tag): # no empty tags allowed
raise ValueError('Empty tag.')
if not isinstance(data, MutableMapping): # data must be dict
raise ValueError('Dict object expected, not "{0!r}".'.format(data))
if not self.cpush:
self.connect_pull(timeout=timeout)
data['_stamp'] = datetime.datetime.utcnow().isoformat()
tagend = TAGEND
serialized_data = salt.utils.dicttrim.trim_dict(
self.serial.dumps(data),
self.opts['max_event_size'],
is_msgpacked=True,
)
log.debug('Sending event - data = {0}'.format(data))
event = '{0}{1}{2}'.format(tag, tagend, serialized_data)
try:
self.push.send(salt.utils.to_bytes(event, 'utf-8'))
except Exception as ex:
log.debug(ex)
raise
return True
def fire_master(self, data, tag, timeout=1000):
''''
Send a single event to the master, with the payload "data" and the
event identifier "tag".
Default timeout is 1000ms
'''
msg = {
'tag': tag,
'data': data,
'events': None,
'pretag': None
}
return self.fire_event(msg, "fire_master", timeout)
def destroy(self, linger=5000):
if self.cpub is True and self.sub.closed is False:
# Wait at most 2.5 secs to send any remaining messages in the
# socket or the context.term() below will hang indefinitely.
# See https://github.com/zeromq/pyzmq/issues/102
self.sub.close()
if self.cpush is True and self.push.closed is False:
self.push.close()
# If sockets are not unregistered from a poller, nothing which touches
# that poller gets garbage collected. The Poller itself, its
# registered sockets and the Context
if isinstance(self.poller.sockets, dict):
for socket in six.iterkeys(self.poller.sockets):
if socket.closed is False:
socket.setsockopt(zmq.LINGER, linger)
socket.close()
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
if socket[0].closed is False:
socket[0].setsockopt(zmq.LINGER, linger)
socket[0].close()
self.poller.unregister(socket[0])
if self.context.closed is False:
self.context.term()
# Hardcore destruction
if hasattr(self.context, 'destroy'):
self.context.destroy(linger=1)
# https://github.com/zeromq/pyzmq/issues/173#issuecomment-4037083
# Assertion failed: get_load () == 0 (poller_base.cpp:32)
time.sleep(0.025)
def fire_ret_load(self, load):
'''
Fire events based on information in the return load
'''
if load.get('retcode') and load.get('fun'):
# Minion fired a bad retcode, fire an event
if load['fun'] in SUB_EVENT:
try:
for tag, data in six.iteritems(load.get('return', {})):
data['retcode'] = load['retcode']
tags = tag.split('_|-')
if data.get('result') is False:
self.fire_event(
data,
'{0}.{1}'.format(tags[0], tags[-1])
) # old dup event
data['jid'] = load['jid']
data['id'] = load['id']
data['success'] = False
data['return'] = 'Error: {0}.{1}'.format(
tags[0], tags[-1])
data['fun'] = load['fun']
data['user'] = load['user']
self.fire_event(
data,
tagify([load['jid'],
'sub',
load['id'],
'error',
load['fun']],
'job'))
except Exception:
pass
def __del__(self):
# skip exceptions in destroy-- since destroy() doesn't cover interpreter
# shutdown-- where globals start going missing
try:
self.destroy()
except: # pylint: disable=W0702
pass
class MasterEvent(SaltEvent):
'''
Warning! Use the get_event function or the code will not be
RAET compatible
Create a master event management object
'''
def __init__(self, sock_dir, opts=None, listen=True):
super(MasterEvent, self).__init__('master', sock_dir, opts, listen=listen)
class LocalClientEvent(MasterEvent):
'''
Warning! Use the get_event function or the code will not be
RAET compatible
This class is just used to differentiate who is handling the events,
specially on logs, but it's the same as MasterEvent.
'''
class NamespacedEvent(object):
'''
A wrapper for sending events within a specific base namespace
'''
def __init__(self, event, base, print_func=None):
self.event = event
self.base = base
self.print_func = print_func
def fire_event(self, data, tag):
if self.print_func is not None:
self.print_func(tag, data)
self.event.fire_event(data, tagify(tag, base=self.base))
class MinionEvent(SaltEvent):
'''
Warning! Use the get_event function or the code will not be
RAET compatible
Create a master event management object
'''
def __init__(self, opts, listen=True):
super(MinionEvent, self).__init__(
'minion', sock_dir=opts.get('sock_dir'), opts=opts, listen=listen)
class AsyncEventPublisher(object):
'''
An event publisher class intended to run in an ioloop (within a single process)
TODO: remove references to "minion_event" whenever we need to use this for other things
'''
def __init__(self, opts, publish_handler, io_loop=None):
self.opts = salt.config.DEFAULT_MINION_OPTS.copy()
self.opts.update(opts)
self.publish_handler = publish_handler
self.io_loop = io_loop or zmq.eventloop.ioloop.ZMQIOLoop()
self.context = zmq.Context()
hash_type = getattr(hashlib, self.opts['hash_type'])
# Only use the first 10 chars to keep longer hashes from exceeding the
# max socket path length.
id_hash = hash_type(salt.utils.to_bytes(self.opts['id'])).hexdigest()[:10]
epub_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pub.ipc'.format(id_hash)
)
if os.path.exists(epub_sock_path):
os.unlink(epub_sock_path)
epull_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pull.ipc'.format(id_hash)
)
if os.path.exists(epull_sock_path):
os.unlink(epull_sock_path)
self.epub_sock = self.context.socket(zmq.PUB)
if self.opts['ipc_mode'] == 'tcp':
epub_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pub_port']
)
epull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pull_port']
)
else:
epub_uri = 'ipc://{0}'.format(epub_sock_path)
salt.utils.zeromq.check_ipc_path_max_len(epub_uri)
epull_uri = 'ipc://{0}'.format(epull_sock_path)
salt.utils.zeromq.check_ipc_path_max_len(epull_uri)
log.debug(
'{0} PUB socket URI: {1}'.format(
self.__class__.__name__, epub_uri
)
)
log.debug(
'{0} PULL socket URI: {1}'.format(
self.__class__.__name__, epull_uri
)
)
# Check to make sure the sock_dir is available, create if not
default_minion_sock_dir = os.path.join(
salt.syspaths.SOCK_DIR,
'minion'
)
minion_sock_dir = self.opts.get('sock_dir', default_minion_sock_dir)
if not os.path.isdir(minion_sock_dir):
# Let's try to create the directory defined on the configuration
# file
try:
os.makedirs(minion_sock_dir, 0o755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's not fail yet and try using the default path
if minion_sock_dir == default_minion_sock_dir:
# We're already trying the default system path, stop now!
raise
if not os.path.isdir(default_minion_sock_dir):
try:
os.makedirs(default_minion_sock_dir, 0o755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's stop at this stage
raise
# Create the pull socket
self.epull_sock = self.context.socket(zmq.PULL)
# Securely bind the event sockets
if self.opts['ipc_mode'] != 'tcp':
old_umask = os.umask(0o177)
try:
log.info('Starting pub socket on {0}'.format(epub_uri))
self.epub_sock.bind(epub_uri)
log.info('Starting pull socket on {0}'.format(epull_uri))
self.epull_sock.bind(epull_uri)
finally:
if self.opts['ipc_mode'] != 'tcp':
os.umask(old_umask)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self.epull_sock, io_loop=self.io_loop)
self.stream.on_recv(self.handle_publish)
def handle_publish(self, package):
'''
Get something from epull, publish it out epub, and return the package (or None)
'''
package = package[0]
try:
self.epub_sock.send(package)
self.io_loop.spawn_callback(self.publish_handler, package)
return package
# Add an extra fallback in case a forked process leeks through
except zmq.ZMQError as exc:
# The interrupt caused by python handling the
# SIGCHLD. Throws this error with errno == EINTR.
# Nothing to receive on the zmq socket throws this error
# with EAGAIN.
# Both are safe to ignore
if exc.errno != errno.EAGAIN and exc.errno != errno.EINTR:
log.critical('Unexpected ZMQError while polling minion',
exc_info=True)
return None
def destroy(self):
if hasattr(self, 'stream') and self.stream.closed is False:
self.stream.close()
if hasattr(self, 'epub_sock') and self.epub_sock.closed is False:
self.epub_sock.close()
if hasattr(self, 'epull_sock') and self.epull_sock.closed is False:
self.epull_sock.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
class EventPublisher(multiprocessing.Process):
'''
The interface that takes master events and republishes them out to anyone
who wants to listen
'''
def __init__(self, opts):
super(EventPublisher, self).__init__()
self.opts = salt.config.DEFAULT_MASTER_OPTS.copy()
self.opts.update(opts)
def run(self):
'''
Bind the pub and pull sockets for events
'''
salt.utils.appendproctitle(self.__class__.__name__)
linger = 5000
# Set up the context
self.context = zmq.Context(1)
# Prepare the master event publisher
self.epub_sock = self.context.socket(zmq.PUB)
try:
self.epub_sock.setsockopt(zmq.HWM, self.opts['event_publisher_pub_hwm'])
except AttributeError:
self.epub_sock.setsockopt(zmq.SNDHWM, self.opts['event_publisher_pub_hwm'])
self.epub_sock.setsockopt(zmq.RCVHWM, self.opts['event_publisher_pub_hwm'])
# Prepare master event pull socket
self.epull_sock = self.context.socket(zmq.PULL)
if self.opts['ipc_mode'] == 'tcp':
epub_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_master_pub_port']
)
epull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_master_pull_port']
)
else:
epub_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'master_event_pub.ipc')
)
salt.utils.zeromq.check_ipc_path_max_len(epub_uri)
epull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'master_event_pull.ipc')
)
salt.utils.zeromq.check_ipc_path_max_len(epull_uri)
# Start the master event publisher
old_umask = os.umask(0o177)
try:
self.epull_sock.bind(epull_uri)
self.epub_sock.bind(epub_uri)
if (self.opts['ipc_mode'] != 'tcp' and (
self.opts['client_acl'] or
self.opts['external_auth'])):
os.chmod(os.path.join(
self.opts['sock_dir'], 'master_event_pub.ipc'), 0o666)
finally:
os.umask(old_umask)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
package = self.epull_sock.recv()
self.epub_sock.send(package)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
if self.epub_sock.closed is False:
self.epub_sock.setsockopt(zmq.LINGER, linger)
self.epub_sock.close()
if self.epull_sock.closed is False:
self.epull_sock.setsockopt(zmq.LINGER, linger)
self.epull_sock.close()
if self.context.closed is False:
self.context.term()
class EventReturn(multiprocessing.Process):
'''
A dedicated process which listens to the master event bus and queues
and forwards events to the specified returner.
'''
def __init__(self, opts):
'''
Initialize the EventReturn system
Return an EventReturn instance
'''
multiprocessing.Process.__init__(self)
self.opts = opts
self.event_return_queue = self.opts['event_return_queue']
local_minion_opts = self.opts.copy()
local_minion_opts['file_client'] = 'local'
self.minion = salt.minion.MasterMinion(local_minion_opts)
self.event_queue = []
self.stop = False
def sig_stop(self, signum, frame):
self.stop = True # tell it to stop
def flush_events(self):
event_return = '{0}.event_return'.format(
self.opts['event_return']
)
if event_return in self.minion.returners:
try:
self.minion.returners[event_return](self.event_queue)
except Exception as exc:
log.error('Could not store events - returner \'{0}\' raised '
'exception: {1}'.format(self.opts['event_return'], exc))
# don't waste processing power unnecessarily on converting a
# potentially huge dataset to a string
if log.level <= logging.DEBUG:
log.debug('Event data that caused an exception: {0}'.format(
self.event_queue))
del self.event_queue[:]
else:
log.error('Could not store return for event(s) - returner '
'\'{1}\' not found.'.format(self.opts['event_return']))
def run(self):
'''
Spin up the multiprocess event returner
'''
# Properly exit if a SIGTERM is signalled
signal.signal(signal.SIGTERM, self.sig_stop)
salt.utils.appendproctitle(self.__class__.__name__)
self.event = get_event('master', opts=self.opts, listen=True)
events = self.event.iter_events(full=True)
self.event.fire_event({}, 'salt/event_listen/start')
try:
for event in events:
if self._filter(event):
self.event_queue.append(event)
if len(self.event_queue) >= self.event_return_queue:
self.flush_events()
if self.stop:
break
except zmq.error.ZMQError as exc:
if exc.errno != errno.EINTR: # Outside interrupt is a normal shutdown case
raise
finally: # flush all we have at this moment
if self.event_queue:
self.flush_events()
def _filter(self, event):
'''
Take an event and run it through configured filters.
Returns True if event should be stored, else False
'''
tag = event['tag']
if tag in self.opts['event_return_whitelist']:
if tag not in self.opts['event_return_blacklist']:
return True
else:
return False # Event was whitelisted and blacklisted
elif tag in self.opts['event_return_blacklist']:
return False
return True
class StateFire(object):
'''
Evaluate the data from a state run and fire events on the master and minion
for each returned chunk that is not "green"
This object is made to only run on a minion
'''
def __init__(self, opts, auth=None):
self.opts = opts
if not auth:
self.auth = salt.crypt.SAuth(self.opts)
else:
self.auth = auth
def fire_master(self, data, tag, preload=None):
'''
Fire an event off on the master server
CLI Example:
.. code-block:: bash
salt '*' event.fire_master 'stuff to be in the event' 'tag'
'''
load = {}
if preload:
load.update(preload)
load.update({
'id': self.opts['id'],
'tag': tag,
'data': data,
'cmd': '_minion_event',
'tok': self.auth.gen_token('salt'),
})
channel = salt.transport.Channel.factory(self.opts)
try:
channel.send(load)
except Exception:
pass
return True
def fire_running(self, running):
'''
Pass in a state "running" dict, this is the return dict from a state
call. The dict will be processed and fire events.
By default yellows and reds fire events on the master and minion, but
this can be configured.
'''
load = {'id': self.opts['id'],
'events': [],
'cmd': '_minion_event'}
for stag in sorted(
running,
key=lambda k: running[k].get('__run_num__', 0)):
if running[stag]['result'] and not running[stag]['changes']:
continue
tag = 'state_{0}_{1}'.format(
str(running[stag]['result']),
'True' if running[stag]['changes'] else 'False')
load['events'].append({
'tag': tag,
'data': running[stag],
})
channel = salt.transport.Channel.factory(self.opts)
try:
channel.send(load)
except Exception:
pass
return True
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Pub/Sub subscriptions create command."""
from apitools.base.py import exceptions as api_ex
from googlecloudsdk.api_lib.util import exceptions
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.command_lib.pubsub import util
from googlecloudsdk.core import log
def _ArgsBeta(parser):
"""Registers flags for this command."""
parser.add_argument('subscription', nargs='+',
help='One or more subscriptions to create.')
parser.add_argument(
'--topic', required=True,
help=('The name of the topic from which this subscription is receiving'
' messages. Each subscription is attached to a single topic.'))
parser.add_argument(
'--topic-project',
help=('The name of the project the provided topic belongs to.'
' If not set, it defaults to the currently selected'
' cloud project.'))
parser.add_argument(
'--ack-deadline', type=int,
help=('The number of seconds the system will wait for a subscriber to'
' acknowledge receiving a message before re-attempting'
' delivery.'))
parser.add_argument(
'--push-endpoint',
help=('A URL to use as the endpoint for this subscription.'
' This will also automatically set the subscription'
' type to PUSH.'))
def _ArgsAlpha(parser):
"""Registers flags for this command that are available only in Alpha."""
parser.add_argument(
'--retain-acked-messages',
action='store_true',
default=None,
help=('Whether or not to retain acknowledged messages. If true,'
' messages are not expunged from the subscription\'s backlog'
' until they fall out of the --message-retention-duration'
' window.'))
parser.add_argument(
'--message-retention-duration',
type=arg_parsers.Duration(),
help=('How long to retain unacknowledged messages in the'
' subscription\'s backlog, from the moment a message is'
' published. If --retain-acked-messages is true, this also'
' configures the retention of acknowledged messages. The default'
' value is 7 days, the minimum is 10 minutes, and the maximum is'
' 7 days. Valid values are strings of the form INTEGER[UNIT],'
' where UNIT is one of "s", "m", "h", and "d" for seconds,'
' seconds, minutes, hours, and days, respectively. If the unit'
' is omitted, seconds is assumed.'))
def _Run(cmd, args, field_adder):
"""Common function to run the Create command.
Args:
cmd: a base.CreateCommand object
args: an argparse namespace. All the arguments that were provided to this
command invocation.
field_adder: Function that populates additional fields in a subscription.
Yields:
A serialized object (dict) describing the results of the operation.
This description fits the Resource described in the ResourceRegistry under
'pubsub.projects.subscriptions'.
Raises:
An HttpException if there was a problem calling the
API subscriptions.Create command.
"""
msgs = cmd.context['pubsub_msgs']
pubsub = cmd.context['pubsub']
topic_project = ''
if args.topic_project:
topic_project = projects_util.ParseProject(args.topic_project).Name()
topic_name = args.topic
for subscription_name in args.subscription:
name = util.SubscriptionFormat(subscription_name)
subscription = msgs.Subscription(
name=name,
topic=util.TopicFormat(topic_name, topic_project),
ackDeadlineSeconds=args.ack_deadline)
if args.push_endpoint:
subscription.pushConfig = msgs.PushConfig(
pushEndpoint=args.push_endpoint)
field_adder(subscription, args)
# TODO(b/32275310): Conform to gcloud error handling guidelines.
try:
result = pubsub.projects_subscriptions.Create(subscription)
failed = None
except api_ex.HttpError as error:
result = subscription
exc = exceptions.HttpException(error)
failed = exc.payload.status_message
result = util.SubscriptionDisplayDict(result, failed)
log.CreatedResource(name, kind='subscription', failed=failed)
yield result
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class CreateBeta(base.CreateCommand):
"""Creates one or more Cloud Pub/Sub subscriptions.
Creates one or more Cloud Pub/Sub subscriptions for a given topic.
The new subscription defaults to a PULL subscription unless a push endpoint
is specified.
"""
def Collection(self):
return util.SUBSCRIPTIONS_COLLECTION
@staticmethod
def Args(parser):
_ArgsBeta(parser)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Yields:
A serialized object (dict) describing the results of the operation.
This description fits the Resource described in the ResourceRegistry under
'pubsub.projects.subscriptions'.
Raises:
An HttpException if there was a problem calling the
API subscriptions.Create command.
"""
for result in _Run(self, args, lambda x, y: None):
yield result
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class CreateAlpha(base.CreateCommand):
"""Creates one or more Cloud Pub/Sub subscriptions.
Creates one or more Cloud Pub/Sub subscriptions for a given topic.
The new subscription defaults to a PULL subscription unless a push endpoint
is specified.
"""
@staticmethod
def Args(parser):
_ArgsBeta(parser)
_ArgsAlpha(parser)
@staticmethod
def _AddFields(subscription, args):
subscription.retainAckedMessages = args.retain_acked_messages
if args.message_retention_duration:
# Duration args are converted to ints in seconds while Duration proto
# fields are represented as strings with unit seconds and suffix 's', so
# we need to convert to the string representation here.
subscription.messageRetentionDuration = str(
args.message_retention_duration) + 's'
def Collection(self):
return util.SUBSCRIPTIONS_COLLECTION
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Yields:
A serialized object (dict) describing the results of the operation.
This description fits the Resource described in the ResourceRegistry under
'pubsub.projects.subscriptions'.
Raises:
An HttpException if there was a problem calling the
API subscriptions.Create command.
"""
for result in _Run(self, args, self._AddFields):
yield result
| |
# coding=utf-8
import sys
from unittest import TestResult, TextTestRunner
import datetime
import re
from teamcity.messages import TeamcityServiceMessages
from teamcity.common import is_string, get_class_fullname, convert_error_to_string, \
dump_test_stdout, dump_test_stderr, get_exception_message, to_unicode, FlushingStringIO
from .diff_tools import EqualsAssertionError, patch_unittest_diff
_real_stdout = sys.stdout
_real_stderr = sys.stderr
_ERROR_HOLDERS_FQN = ("unittest.suite._ErrorHolder", "unittest2.suite._ErrorHolder")
class TeamcityTestResult(TestResult):
separator2 = "\n"
# noinspection PyUnusedLocal
def __init__(self, stream=_real_stdout, descriptions=None, verbosity=None):
super(TeamcityTestResult, self).__init__()
# Some code may ask for self.failfast, see unittest2.case.TestCase.subTest
self.failfast = getattr(self, "failfast", False)
self.test_started_datetime_map = {}
self.failed_tests = set()
self.subtest_failures = {}
self.messages = TeamcityServiceMessages(_real_stdout)
self.current_test_id = None
@staticmethod
def get_test_id(test):
if is_string(test):
return test
test_class_fullname = get_class_fullname(test)
test_id = test.id()
if test_class_fullname in _ERROR_HOLDERS_FQN:
# patch setUpModule (__main__) -> __main__.setUpModule
return re.sub(r'^(.*) \((.*)\)$', r'\2.\1', test_id)
# Force test_id for doctests
if test_class_fullname != "doctest.DocTestCase":
desc = test.shortDescription()
test_method_name = getattr(test, "_testMethodName", "")
if desc and desc != test_id and desc != test_method_name:
return "%s (%s)" % (test_id, desc.replace('.', '_'))
return test_id
def addSuccess(self, test):
super(TeamcityTestResult, self).addSuccess(test)
def addExpectedFailure(self, test, err):
_super = super(TeamcityTestResult, self)
if hasattr(_super, "addExpectedFailure"):
_super.addExpectedFailure(test, err)
err = convert_error_to_string(err)
test_id = self.get_test_id(test)
self.messages.testIgnored(test_id, message="Expected failure: " + err, flowId=test_id)
def get_subtest_block_id(self, test, subtest):
test_id = self.get_test_id(test)
subtest_id = self.get_test_id(subtest)
if subtest_id.startswith(test_id):
block_id = subtest_id[len(test_id):].strip()
else:
block_id = subtest_id
if len(block_id) == 0:
block_id = test_id
return block_id
def addSkip(self, test, reason=""):
if sys.version_info >= (2, 7):
super(TeamcityTestResult, self).addSkip(test, reason)
if reason:
if isinstance(reason, Exception):
reason_str = ": " + get_exception_message(reason)
else:
reason_str = ": " + to_unicode(reason)
else:
reason_str = ""
test_class_name = get_class_fullname(test)
if test_class_name == "unittest.case._SubTest" or test_class_name == "unittest2.case._SubTest":
parent_test = test.test_case
parent_test_id = self.get_test_id(parent_test)
subtest = test
block_id = self.get_subtest_block_id(parent_test, subtest)
self.messages.subTestBlockOpened(block_id, subTestResult="Skip", flowId=parent_test_id)
self.messages.testStdOut(parent_test_id, out="SubTest skipped" + reason_str + "\n", flowId=parent_test_id)
self.messages.blockClosed(block_id, flowId=parent_test_id)
else:
test_id = self.get_test_id(test)
if test_id not in self.test_started_datetime_map:
# Test ignored without startTest. Handle start and finish events ourselves
self.messages.testStarted(test_id, flowId=test_id)
self.messages.testIgnored(test_id, message="Skipped" + reason_str, flowId=test_id)
self.messages.testFinished(test_id, flowId=test_id)
else:
self.messages.testIgnored(test_id, message="Skipped" + reason_str, flowId=test_id)
def addUnexpectedSuccess(self, test):
_super = super(TeamcityTestResult, self)
if hasattr(_super, "addUnexpectedSuccess"):
_super.addUnexpectedSuccess(test)
test_id = self.get_test_id(test)
self.messages.testFailed(test_id, message='Failure',
details="Test should not succeed since it's marked with @unittest.expectedFailure",
flowId=test_id)
def addError(self, test, err, *k):
super(TeamcityTestResult, self).addError(test, err)
test_class = get_class_fullname(test)
if test_class in _ERROR_HOLDERS_FQN:
# This is a standalone error
test_id = self.get_test_id(test)
self.messages.testStarted(test_id, flowId=test_id)
self.report_fail(test, 'Failure', err)
self.messages.testFinished(test_id, flowId=test_id)
elif get_class_fullname(err[0]) == "unittest2.case.SkipTest":
message = ""
if hasattr(err[1], "message"):
message = getattr(err[1], "message", "")
elif hasattr(err[1], "args"):
message = getattr(err[1], "args", [""])[0]
self.addSkip(test, message)
else:
self.report_fail(test, 'Error', err)
def addFailure(self, test, err, *k):
super(TeamcityTestResult, self).addFailure(test, err)
self.report_fail(test, 'Failure', err)
def addSubTest(self, test, subtest, err):
_super = super(TeamcityTestResult, self)
if hasattr(_super, "addSubTest"):
_super.addSubTest(test, subtest, err)
test_id = self.get_test_id(test)
subtest_id = self.get_test_id(subtest)
if subtest_id.startswith(test_id):
# Replace "." -> "_" since '.' is a test hierarchy separator
# See i.e. https://github.com/JetBrains/teamcity-messages/issues/134 (https://youtrack.jetbrains.com/issue/PY-23846)
block_id = subtest_id[len(test_id):].strip().replace(".", "_")
else:
block_id = subtest_id
if len(block_id) == 0:
block_id = subtest_id
if err is not None:
self.add_subtest_failure(test_id, block_id)
if issubclass(err[0], test.failureException):
self.messages.subTestBlockOpened(block_id, subTestResult="Failure", flowId=test_id)
self.messages.testStdErr(test_id, out="SubTest failure: %s\n" % convert_error_to_string(err), flowId=test_id)
self.messages.blockClosed(block_id, flowId=test_id)
else:
self.messages.subTestBlockOpened(block_id, subTestResult="Error", flowId=test_id)
self.messages.testStdErr(test_id, out="SubTest error: %s\n" % convert_error_to_string(err), flowId=test_id)
self.messages.blockClosed(block_id, flowId=test_id)
else:
self.messages.subTestBlockOpened(block_id, subTestResult="Success", flowId=test_id)
self.messages.blockClosed(block_id, flowId=test_id)
def add_subtest_failure(self, test_id, subtest_block_id):
fail_array = self.subtest_failures.get(test_id, [])
fail_array.append(subtest_block_id)
self.subtest_failures[test_id] = fail_array
def get_subtest_failure(self, test_id):
fail_array = self.subtest_failures.get(test_id, [])
return ", ".join(fail_array)
def report_fail(self, test, fail_type, err):
test_id = self.get_test_id(test)
diff_failed = None
try:
error = err[1]
if isinstance(error, EqualsAssertionError):
diff_failed = error
except:
pass
if is_string(err):
details = err
elif get_class_fullname(err) == "twisted.python.failure.Failure":
details = err.getTraceback()
else:
frames_to_skip_from_tail = 2 if diff_failed else 0
details = convert_error_to_string(err, frames_to_skip_from_tail)
subtest_failures = self.get_subtest_failure(test_id)
if subtest_failures:
details = "Failed subtests list: " + subtest_failures + "\n\n" + details.strip()
details = details.strip()
if diff_failed:
self.messages.testFailed(test_id,
message=diff_failed.msg,
details=details,
flowId=test_id,
comparison_failure=diff_failed)
else:
self.messages.testFailed(test_id, message=fail_type, details=details, flowId=test_id)
self.failed_tests.add(test_id)
def startTest(self, test):
test_id = self.get_test_id(test)
self.current_test_id = test_id
super(TeamcityTestResult, self).startTest(test)
self.test_started_datetime_map[test_id] = datetime.datetime.now()
self.messages.testStarted(test_id, captureStandardOutput='true', flowId=test_id)
def _dump_test_stderr(self, data):
if self.current_test_id is not None:
dump_test_stderr(self.messages, self.current_test_id, self.current_test_id, data)
else:
_real_stderr.write(data)
def _dump_test_stdout(self, data):
if self.current_test_id is not None:
dump_test_stdout(self.messages, self.current_test_id, self.current_test_id, data)
else:
_real_stdout.write(data)
def _setupStdout(self):
if getattr(self, 'buffer', None):
self._stderr_buffer = FlushingStringIO(self._dump_test_stderr)
self._stdout_buffer = FlushingStringIO(self._dump_test_stdout)
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def stopTest(self, test):
test_id = self.get_test_id(test)
if getattr(self, 'buffer', None):
# Do not allow super() method to print output by itself
self._mirrorOutput = False
output = sys.stdout.getvalue()
if output:
dump_test_stdout(self.messages, test_id, test_id, output)
error = sys.stderr.getvalue()
if error:
dump_test_stderr(self.messages, test_id, test_id, error)
super(TeamcityTestResult, self).stopTest(test)
self.current_test_id = None
if test_id not in self.failed_tests:
subtest_failures = self.get_subtest_failure(test_id)
if subtest_failures:
self.report_fail(test, "One or more subtests failed", "")
time_diff = datetime.datetime.now() - self.test_started_datetime_map[test_id]
self.messages.testFinished(test_id, testDuration=time_diff, flowId=test_id)
def printErrors(self):
pass
class TeamcityTestRunner(TextTestRunner):
resultclass = TeamcityTestResult
if sys.version_info < (2, 7):
def _makeResult(self):
return TeamcityTestResult(self.stream, self.descriptions, self.verbosity)
def run(self, test):
# noinspection PyBroadException
patch_unittest_diff()
try:
total_tests = test.countTestCases()
TeamcityServiceMessages(_real_stdout).testCount(total_tests)
except:
pass
return super(TeamcityTestRunner, self).run(test)
if __name__ == '__main__':
from unittest import main
main(module=None, testRunner=TeamcityTestRunner())
| |
from mpegdash.utils import (
parse_attr_value, parse_child_nodes, parse_node_value,
write_attr_value, write_child_node, write_node_value
)
class XMLNode(object):
def parse(self, xmlnode):
raise NotImplementedError('Should have implemented this')
def write(self, xmlnode):
raise NotImplementedError('Should have implemented this')
class Subset(XMLNode):
def __init__(self):
self.id = None # xs:string
self.contains = [] # UIntVectorType (required)
def parse(self, xmlnode):
self.id = parse_attr_value(xmlnode, 'id', str)
self.contains = parse_attr_value(xmlnode, 'contains', [int])
def write(self, xmlnode):
write_attr_value(xmlnode, 'id', self.id)
write_attr_value(xmlnode, 'contains', self.contains)
class URL(XMLNode):
def __init__(self):
self.source_url = None # xs:anyURI
self.range = None # xs:string
def parse(self, xmlnode):
self.source_url = parse_attr_value(xmlnode, 'sourceURL', str)
self.range = parse_attr_value(xmlnode, 'range', str)
def write(self, xmlnode):
write_attr_value(xmlnode, 'sourceURL', self.source_url)
write_attr_value(xmlnode, 'range', self.range)
class BaseURL(XMLNode):
def __init__(self):
self.base_url_value = None # xs:anyURI
self.service_location = None # xs:string
self.byte_range = None # xs:string
self.availability_time_offset = None # xs:double
self.availability_time_complete = None # xs:boolean
def parse(self, xmlnode):
self.base_url_value = parse_node_value(xmlnode, str)
self.service_location = parse_attr_value(xmlnode, 'serviceLocation', str)
self.byte_range = parse_attr_value(xmlnode, 'byteRange', str)
self.availability_time_offset = parse_attr_value(xmlnode, 'availabilityTimeOffset', float)
self.availability_time_complete = parse_attr_value(xmlnode, 'availabilityTimeComplete', bool)
def write(self, xmlnode):
write_node_value(xmlnode, self.base_url_value)
write_attr_value(xmlnode, 'serviceLocation', self.service_location)
write_attr_value(xmlnode, 'byteRange', self.byte_range)
write_attr_value(xmlnode, 'availabilityTimeOffset', self.availability_time_offset)
write_attr_value(xmlnode, 'availabilityTimeComplete', self.availability_time_complete)
class XsStringElement(XMLNode):
def __init__(self):
self.text = None
def parse(self, xmlnode):
self.text = parse_node_value(xmlnode, str)
def write(self, xmlnode):
write_node_value(xmlnode, self.text)
class ProgramInformation(XMLNode):
def __init__(self):
self.lang = None # xs:language
self.more_information_url = None # xs:anyURI
self.titles = None # xs:string*
self.sources = None # xs:string*
self.copyrights = None # xs:string*
def parse(self, xmlnode):
self.lang = parse_attr_value(xmlnode, 'lang', str)
self.more_information_url = parse_attr_value(xmlnode, 'moreInformationURL', str)
self.titles = parse_child_nodes(xmlnode, 'Title', XsStringElement)
self.sources = parse_child_nodes(xmlnode, 'Source', XsStringElement)
self.copyrights = parse_child_nodes(xmlnode, 'Copyright', XsStringElement)
def write(self, xmlnode):
write_attr_value(xmlnode, 'lang', self.lang)
write_attr_value(xmlnode, 'moreInformationURL', self.more_information_url)
write_child_node(xmlnode, 'Title', self.titles)
write_child_node(xmlnode, 'Source', self.sources)
write_child_node(xmlnode, 'Copyright', self.copyrights)
class Metrics(XMLNode):
def __init__(self):
self.metrics = '' # xs:string (required)
self.reportings = None # DescriptorType*
self.ranges = None # RangeType*
def parse(self, xmlnode):
self.metrics = parse_attr_value(xmlnode, 'metrics', str)
self.reportings = parse_child_nodes(xmlnode, 'Reporting', Descriptor)
self.ranges = parse_child_nodes(xmlnode, 'Range', Range)
def write(self, xmlnode):
write_attr_value(xmlnode, 'metrics', self.metrics)
write_child_node(xmlnode, 'Reporting', self.reportings)
write_child_node(xmlnode, 'Range', self.ranges)
class Range(XMLNode):
def __init__(self):
self.starttime = None # xs:duration
self.duration = None # xs:duration
def parse(self, xmlnode):
self.starttime = parse_attr_value(xmlnode, 'starttime', str)
self.duration = parse_attr_value(xmlnode, 'duration', str)
def write(self, xmlnode):
write_attr_value(xmlnode, 'starttime', self.starttime)
write_attr_value(xmlnode, 'duration', self.duration)
class SegmentURL(XMLNode):
def __init__(self):
self.media = None # xs:anyURI
self.media_range = None # xs:string
self.index = None # xs:anyURI
self.index_range = None # xs:string
def parse(self, xmlnode):
self.media = parse_attr_value(xmlnode, 'media', str)
self.media_range = parse_attr_value(xmlnode, 'mediaRange', str)
self.index = parse_attr_value(xmlnode, 'index', str)
self.index_range = parse_attr_value(xmlnode, 'indexRange', str)
def write(self, xmlnode):
write_attr_value(xmlnode, 'media', self.media)
write_attr_value(xmlnode, 'mediaRange', self.media_range)
write_attr_value(xmlnode, 'index', self.index)
write_attr_value(xmlnode, 'indexRange', self.index_range)
class S(XMLNode):
def __init__(self):
self.t = None # xs:unsignedLong
self.d = 0 # xs:unsignedLong (required)
self.r = None # xml:integer
def parse(self, xmlnode):
self.t = parse_attr_value(xmlnode, 't', int)
self.d = parse_attr_value(xmlnode, 'd', int)
self.r = parse_attr_value(xmlnode, 'r', int)
def write(self, xmlnode):
write_attr_value(xmlnode, 't', self.t)
write_attr_value(xmlnode, 'd', self.d)
write_attr_value(xmlnode, 'r', self.r)
class SegmentTimeline(XMLNode):
def __init__(self):
self.Ss = None # xs:complexType+
def parse(self, xmlnode):
self.Ss = parse_child_nodes(xmlnode, 'S', S)
def write(self, xmlnode):
write_child_node(xmlnode, 'S', self.Ss)
class SegmentBase(XMLNode):
def __init__(self):
self.timescale = None # xs:unsignedInt
self.index_range = None # xs:string
self.index_range_exact = None # xs:boolean
self.presentation_time_offset = None # xs:unsignedLong
self.availability_time_offset = None # xs:double
self.availability_time_complete = None # xs:boolean
self.initializations = None # URLType*
self.representation_indexes = None # URLType*
def parse(self, xmlnode):
self.timescale = parse_attr_value(xmlnode, 'timescale', int)
self.index_range = parse_attr_value(xmlnode, 'indexRange', str)
self.index_range_exact = parse_attr_value(xmlnode, 'indexRangeExact', bool)
self.presentation_time_offset = parse_attr_value(xmlnode, 'presentationTimeOffset', int)
self.availability_time_offset = parse_attr_value(xmlnode, 'availabilityTimeOffset', float)
self.availability_time_complete = parse_attr_value(xmlnode, 'availabilityTimeComplete', bool)
self.initializations = parse_child_nodes(xmlnode, 'Initialization', URL)
self.representation_indexes = parse_child_nodes(xmlnode, 'RepresentationIndex', URL)
def write(self, xmlnode):
write_attr_value(xmlnode, 'timescale', self.timescale)
write_attr_value(xmlnode, 'indexRange', self.index_range)
write_attr_value(xmlnode, 'indexRangeExact', self.index_range_exact)
write_attr_value(xmlnode, 'presentationTimeOffset', self.presentation_time_offset)
write_attr_value(xmlnode, 'availabilityTimeOffset', self.availability_time_offset)
write_attr_value(xmlnode, 'availabilityTimeComplete', self.availability_time_complete)
write_child_node(xmlnode, 'Initialization', self.initializations)
write_child_node(xmlnode, 'RepresentationIndex', self.representation_indexes)
class MultipleSegmentBase(SegmentBase):
def __init__(self):
SegmentBase.__init__(self)
self.duration = None # xs:unsignedInt
self.start_number = None # xs:unsignedInt
self.segment_timelines = None # SegmentTimelineType*
self.bitstream_switchings = None # URLType*
def parse(self, xmlnode):
SegmentBase.parse(self, xmlnode)
self.duration = parse_attr_value(xmlnode, 'duration', int)
self.start_number = parse_attr_value(xmlnode, 'startNumber', int)
self.segment_timelines = parse_child_nodes(xmlnode, 'SegmentTimeline', SegmentTimeline)
self.bitstream_switchings = parse_child_nodes(xmlnode, 'BitstreamSwitching', URL)
def write(self, xmlnode):
SegmentBase.write(self, xmlnode)
write_attr_value(xmlnode, 'duration', self.duration)
write_attr_value(xmlnode, 'startNumber', self.start_number)
write_child_node(xmlnode, 'SegmentTimeline', self.segment_timelines)
write_child_node(xmlnode, 'BitstreamSwitching', self.bitstream_switchings)
class SegmentTemplate(MultipleSegmentBase):
def __init__(self):
MultipleSegmentBase.__init__(self)
self.media = None # xs:string
self.index = None # xs:string
self.initialization = None # xs:string
self.bitstream_switching = None # xs:string
def parse(self, xmlnode):
MultipleSegmentBase.parse(self, xmlnode)
self.media = parse_attr_value(xmlnode, 'media', str)
self.index = parse_attr_value(xmlnode, 'index', str)
self.initialization = parse_attr_value(xmlnode, 'initialization', str)
self.bitstream_switching = parse_attr_value(xmlnode, 'bitstreamSwitching', str)
def write(self, xmlnode):
MultipleSegmentBase.write(self, xmlnode)
write_attr_value(xmlnode, 'media', self.media)
write_attr_value(xmlnode, 'index', self.index)
write_attr_value(xmlnode, 'initialization', self.initialization)
write_attr_value(xmlnode, 'bitstreamSwitching', self.bitstream_switching)
class SegmentList(MultipleSegmentBase):
def __init__(self):
MultipleSegmentBase.__init__(self)
self.segment_urls = None # SegmentURLType
def parse(self, xmlnode):
MultipleSegmentBase.parse(self, xmlnode)
self.segment_urls = parse_child_nodes(xmlnode, 'SegmentURL', SegmentURL)
def write(self, xmlnode):
MultipleSegmentBase.write(self, xmlnode)
write_child_node(xmlnode, 'SegmentURL', self.segment_urls)
class Event(XMLNode):
def __init__(self):
self.event_value = None # xs:string
self.message_data = None # xs:string
self.presentation_time = None # xs:unsignedLong
self.duration = None # xs:unsignedLong
self.id = None # xs:unsignedInt
def parse(self, xmlnode):
self.event_value = parse_node_value(xmlnode, str)
self.message_data = parse_attr_value(xmlnode, 'messageData', str)
self.presentation_time = parse_attr_value(xmlnode, 'presentationTime', int)
self.duration = parse_attr_value(xmlnode, 'duration', int)
self.id = parse_attr_value(xmlnode, 'id', int)
def write(self, xmlnode):
write_node_value(xmlnode, self.event_value)
write_attr_value(xmlnode, 'messageData', self.message_data)
write_attr_value(xmlnode, 'presentationTime', self.presentation_time)
write_attr_value(xmlnode, 'duration', self.duration)
write_attr_value(xmlnode, 'id', self.id)
class Descriptor(XMLNode):
def __init__(self):
self.scheme_id_uri = '' # xs:anyURI (required)
self.value = None # xs:string
self.id = None # xs:string
self.key_id = None # xs:string
def parse(self, xmlnode):
self.scheme_id_uri = parse_attr_value(xmlnode, 'schemeIdUri', str)
self.value = parse_attr_value(xmlnode, 'value', str)
self.id = parse_attr_value(xmlnode, 'id', str)
self.key_id = parse_attr_value(xmlnode, 'ns2:default_KID', str)
def write(self, xmlnode):
write_attr_value(xmlnode, 'schemeIdUri', self.scheme_id_uri)
write_attr_value(xmlnode, 'value', self.value)
write_attr_value(xmlnode, 'id', self.id)
write_attr_value(xmlnode, 'ns2:default_KID', self.key_id)
class ContentComponent(XMLNode):
def __init__(self):
self.id = None # xs:unsigendInt
self.lang = None # xs:language
self.content_type = None # xs:string
self.par = None # RatioType
self.accessibilities = None # DescriptorType*
self.roles = None # DescriptorType*
self.ratings = None # DescriptorType*
self.viewpoints = None # DescriptorType*
def parse(self, xmlnode):
self.id = parse_attr_value(xmlnode, 'id', int)
self.lang = parse_attr_value(xmlnode, 'lang', str)
self.content_type = parse_attr_value(xmlnode, 'contentType', str)
self.par = parse_attr_value(xmlnode, 'par', str)
self.accessibilities = parse_child_nodes(xmlnode, 'Accessibility', Descriptor)
self.roles = parse_child_nodes(xmlnode, 'Role', Descriptor)
self.ratings = parse_child_nodes(xmlnode, 'Rating', Descriptor)
self.viewpoints = parse_child_nodes(xmlnode, 'Viewpoint', Descriptor)
def write(self, xmlnode):
write_attr_value(xmlnode, 'id', self.id)
write_attr_value(xmlnode, 'lang', self.lang)
write_attr_value(xmlnode, 'contentType', self.content_type)
write_attr_value(xmlnode, 'par', self.par)
write_child_node(xmlnode, 'Accessibility', self.accessibilities)
write_child_node(xmlnode, 'Role', self.roles)
write_child_node(xmlnode, 'Rating', self.ratings)
write_child_node(xmlnode, 'Viewpoint', self.viewpoints)
class RepresentationBase(XMLNode):
def __init__(self):
self.profile = None # xs:string
self.profiles = None # xs:string
self.width = None # xs:unsigendInt
self.height = None # xs:unsigendInt
self.sar = None # RatioType
self.frame_rate = None # FrameRateType
self.audio_sampling_rate = None # xs:string
self.mime_type = None # xs:string
self.segment_profiles = None # xs:string
self.codecs = None # xs:string
self.maximum_sap_period = None # xs:double
self.start_with_sap = None # SAPType
self.max_playout_rate = None # xs:double
self.coding_dependency = None # xs:boolean
self.scan_type = None # VideoScanType
self.frame_packings = None # DescriptorType*
self.audio_channel_configurations = None # DescriptorType*
self.content_protections = None # DescriptorType*
self.essential_properties = None # DescriptorType*
self.supplemental_properties = None # DescriptorType*
self.inband_event_streams = None # DescriptorType*
def parse(self, xmlnode):
self.profile = parse_attr_value(xmlnode, 'profile', str)
self.profiles = parse_attr_value(xmlnode, 'profiles', str)
self.width = parse_attr_value(xmlnode, 'width', int)
self.height = parse_attr_value(xmlnode, 'height', int)
self.sar = parse_attr_value(xmlnode, 'sar', str)
self.frame_rate = parse_attr_value(xmlnode, 'frameRate', str)
self.audio_sampling_rate = parse_attr_value(xmlnode, 'audioSamplingRate', str)
self.mime_type = parse_attr_value(xmlnode, 'mimeType', str)
self.segment_profiles = parse_attr_value(xmlnode, 'segmentProfiles', str)
self.codecs = parse_attr_value(xmlnode, 'codecs', str)
self.maximum_sap_period = parse_attr_value(xmlnode, 'maximumSAPPeriod', float)
self.start_with_sap = parse_attr_value(xmlnode, 'startWithSAP', int)
self.max_playout_rate = parse_attr_value(xmlnode, 'maxPlayoutRate', float)
self.coding_dependency = parse_attr_value(xmlnode, 'codingDependency', bool)
self.scan_type = parse_attr_value(xmlnode, 'scanType', str)
self.frame_packings = parse_child_nodes(xmlnode, 'FramePacking', Descriptor)
self.audio_channel_configurations = parse_child_nodes(xmlnode, 'AudioChannelConfiguration', Descriptor)
self.content_protections = parse_child_nodes(xmlnode, 'ContentProtection', Descriptor)
self.essential_properties = parse_child_nodes(xmlnode, 'EssentialProperty', Descriptor)
self.supplemental_properties = parse_child_nodes(xmlnode, 'SupplementalProperty', Descriptor)
self.inband_event_streams = parse_child_nodes(xmlnode, 'InbandEventStream', Descriptor)
def write(self, xmlnode):
write_attr_value(xmlnode, 'profile', self.profile)
write_attr_value(xmlnode, 'profiles', self.profiles)
write_attr_value(xmlnode, 'width', self.width)
write_attr_value(xmlnode, 'height', self.height)
write_attr_value(xmlnode, 'sar', self.sar)
write_attr_value(xmlnode, 'frameRate', self.frame_rate)
write_attr_value(xmlnode, 'audioSamplingRate', self.audio_sampling_rate)
write_attr_value(xmlnode, 'mimeType', self.mime_type)
write_attr_value(xmlnode, 'segmentProfiles', self.segment_profiles)
write_attr_value(xmlnode, 'codecs', self.codecs)
write_attr_value(xmlnode, 'maximumSAPPeriod', self.maximum_sap_period)
write_attr_value(xmlnode, 'startWithSAP', self.start_with_sap)
write_attr_value(xmlnode, 'maxPlayoutRate', self.max_playout_rate)
write_attr_value(xmlnode, 'codingDependency', self.coding_dependency)
write_attr_value(xmlnode, 'scanType', self.scan_type)
write_child_node(xmlnode, 'FramePacking', self.frame_packings)
write_child_node(xmlnode, 'AudioChannelConfiguration', self.audio_channel_configurations)
write_child_node(xmlnode, 'ContentProtection', self.content_protections)
write_child_node(xmlnode, 'EssentialProperty', self.essential_properties)
write_child_node(xmlnode, 'SupplementalProperty', self.supplemental_properties)
write_child_node(xmlnode, 'InbandEventStream', self.inband_event_streams)
class Representation(RepresentationBase):
def __init__(self):
RepresentationBase.__init__(self)
self.id = '' # StringNoWhitespaceType (Required)
self.bandwidth = 0 # xs:unsignedInt (required)
self.quality_ranking = None # xs:unsignedInt
self.dependency_id = None # StringVectorType
self.num_channels = None # xs:unsignedInt
self.sample_rate = None # xs:unsignedLong
self.base_urls = None # BaseURLType*
self.segment_bases = None # SegmentBaseType*
self.segment_lists = None # SegmentListType*
self.segment_templates = None # SegmentTemplateType*
self.sub_representations = None # SubRepresentationType*
def parse(self, xmlnode):
RepresentationBase.parse(self, xmlnode)
self.id = parse_attr_value(xmlnode, 'id', str)
self.bandwidth = parse_attr_value(xmlnode, 'bandwidth', int)
self.quality_ranking = parse_attr_value(xmlnode, 'qualityRanking', int)
self.dependency_id = parse_attr_value(xmlnode, 'dependencyId', [str])
self.num_channels = parse_attr_value(xmlnode, 'numChannels', int)
self.sample_rate = parse_attr_value(xmlnode, 'sampleRate', int)
self.base_urls = parse_child_nodes(xmlnode, 'BaseURL', BaseURL)
self.segment_bases = parse_child_nodes(xmlnode, 'SegmentBase', SegmentBase)
self.segment_lists = parse_child_nodes(xmlnode, 'SegmentList', SegmentList)
self.segment_templates = parse_child_nodes(xmlnode, 'SegmentTemplate', SegmentTemplate)
self.sub_representations = parse_child_nodes(xmlnode, 'SubRepresentation', SubRepresentation)
def write(self, xmlnode):
RepresentationBase.write(self, xmlnode)
write_attr_value(xmlnode, 'id', self.id)
write_attr_value(xmlnode, 'width', self.width)
write_attr_value(xmlnode, 'height', self.height)
write_attr_value(xmlnode, 'bandwidth', self.bandwidth)
write_attr_value(xmlnode, 'mimeType', self.mime_type)
write_attr_value(xmlnode, 'codecs', self.codecs)
write_child_node(xmlnode, 'BaseURL', self.base_urls)
write_child_node(xmlnode, 'SegmentBase', self.segment_bases)
write_child_node(xmlnode, 'SegmentList', self.segment_lists)
write_child_node(xmlnode, 'SegmentTemplate', self.segment_templates)
write_child_node(xmlnode, 'SubRepresentation', self.sub_representations)
class SubRepresentation(RepresentationBase):
def __init__(self):
RepresentationBase.__init__(self)
self.level = None # xs:unsigendInt
self.bandwidth = None # xs:unsignedInt
self.dependency_level = None # UIntVectorType
self.content_component = None # StringVectorType
def parse(self, xmlnode):
RepresentationBase.parse(self, xmlnode)
self.level = parse_attr_value(xmlnode, 'level', int)
self.bandwidth = parse_attr_value(xmlnode, 'bandwidth', int)
self.dependency_level = parse_attr_value(xmlnode, 'dependencyLevel', [int])
self.content_component = parse_attr_value(xmlnode, 'contentComponent', [str])
def write(self, xmlnode):
RepresentationBase.write(self, xmlnode)
write_attr_value(xmlnode, 'level', self.level)
write_attr_value(xmlnode, 'bandwidth', self.bandwidth)
write_attr_value(xmlnode, 'dependencyLevel', self.dependency_level)
write_attr_value(xmlnode, 'contentComponent', self.content_component)
class AdaptationSet(RepresentationBase):
def __init__(self):
RepresentationBase.__init__(self)
self.id = None # xs:unsignedInt
self.group = None # xs:unsignedInt
self.lang = None # xs:language
self.label = None # xs:string
self.content_type = None # xs:string
self.par = None # RatioType
self.min_bandwidth = None # xs:unsignedInt
self.max_bandwidth = None # xs:unsignedInt
self.min_width = None # xs:unsignedInt
self.max_width = None # xs:unsignedInt
self.min_height = None # xs:unsignedInt
self.max_height = None # xs:unsignedInt
self.min_frame_rate = None # FrameRateType
self.max_frame_rate = None # FrameRateType
self.segment_alignment = None # ConditionalUintType
self.selection_priority = None # xs:unsignedInt
self.subsegment_alignment = None # ConditionalUintType
self.subsegment_starts_with_sap = None # SAPType
self.bitstream_switching = None # xs:boolean
self.accessibilities = None # DescriptorType*
self.roles = None # DescriptorType*
self.ratings = None # DescriptorType*
self.viewpoints = None # DescriptorType*
self.content_components = None # DescriptorType*
self.base_urls = None # BaseURLType*
self.segment_bases = None # SegmentBase*
self.segment_lists = None # SegmentListType*
self.segment_templates = None # SegmentTemplateType*
self.representations = None # RepresentationType*
def parse(self, xmlnode):
RepresentationBase.parse(self, xmlnode)
self.id = parse_attr_value(xmlnode, 'id', int)
self.group = parse_attr_value(xmlnode, 'group', int)
self.lang = parse_attr_value(xmlnode, 'lang', str)
self.label = parse_attr_value(xmlnode, 'label', str)
self.content_type = parse_attr_value(xmlnode, 'contentType', str)
self.par = parse_attr_value(xmlnode, 'par', str)
self.min_bandwidth = parse_attr_value(xmlnode, 'minBandwidth', int)
self.max_bandwidth = parse_attr_value(xmlnode, 'maxBandwidth', int)
self.min_width = parse_attr_value(xmlnode, 'minWidth', int)
self.max_width = parse_attr_value(xmlnode, 'maxWidth', int)
self.min_height = parse_attr_value(xmlnode, 'minHeight', int)
self.max_height = parse_attr_value(xmlnode, 'maxHeight', int)
self.min_frame_rate = parse_attr_value(xmlnode, 'minFrameRate', str)
self.max_frame_rate = parse_attr_value(xmlnode, 'maxFrameRate', str)
self.segment_alignment = parse_attr_value(xmlnode, 'segmentAlignment', bool)
self.selection_priority = parse_attr_value(xmlnode, 'selectionPriority', int)
self.subsegment_alignment = parse_attr_value(xmlnode, 'subsegmentAlignment', bool)
self.subsegment_starts_with_sap = parse_attr_value(xmlnode, 'subsegmentStartsWithSAP', int)
self.bitstream_switching = parse_attr_value(xmlnode, 'bitstreamSwitching', bool)
self.accessibilities = parse_child_nodes(xmlnode, 'Accessibility', Descriptor)
self.roles = parse_child_nodes(xmlnode, 'Role', Descriptor)
self.ratings = parse_child_nodes(xmlnode, 'Rating', Descriptor)
self.viewpoints = parse_child_nodes(xmlnode, 'Viewpoint', Descriptor)
self.content_components = parse_child_nodes(xmlnode, 'ContentComponent', ContentComponent)
self.base_urls = parse_child_nodes(xmlnode, 'BaseURL', BaseURL)
self.segment_bases = parse_child_nodes(xmlnode, 'SegmentBase', SegmentBase)
self.segment_lists = parse_child_nodes(xmlnode, 'SegmentList', SegmentList)
self.segment_templates = parse_child_nodes(xmlnode, 'SegmentTemplate', SegmentTemplate)
self.representations = parse_child_nodes(xmlnode, 'Representation', Representation)
def write(self, xmlnode):
RepresentationBase.write(self, xmlnode)
write_attr_value(xmlnode, 'id', self.id)
write_attr_value(xmlnode, 'group', self.group)
write_attr_value(xmlnode, 'lang', self.lang)
write_attr_value(xmlnode, 'label', self.label)
write_attr_value(xmlnode, 'contentType', self.content_type)
write_attr_value(xmlnode, 'par', self.par)
write_attr_value(xmlnode, 'minBandwidth', self.min_bandwidth)
write_attr_value(xmlnode, 'maxBandwidth', self.max_bandwidth)
write_attr_value(xmlnode, 'minWidth', self.min_width)
write_attr_value(xmlnode, 'maxWidth', self.max_width)
write_attr_value(xmlnode, 'minHeight', self.min_height)
write_attr_value(xmlnode, 'maxHeight', self.max_height)
write_attr_value(xmlnode, 'minFrameRate', self.min_frame_rate)
write_attr_value(xmlnode, 'maxFrameRate', self.max_frame_rate)
write_attr_value(xmlnode, 'segmentAlignment', self.segment_alignment)
write_attr_value(xmlnode, 'selectionPriority', self.selection_priority)
write_attr_value(xmlnode, 'subsegmentAlignment', self.subsegment_alignment)
write_attr_value(xmlnode, 'subsegmentStartsWithSAP', self.subsegment_starts_with_sap)
write_attr_value(xmlnode, 'bitstreamSwitching', self.bitstream_switching)
write_child_node(xmlnode, 'Accessibility', self.accessibilities)
write_child_node(xmlnode, 'Role', self.roles)
write_child_node(xmlnode, 'Rating', self.ratings)
write_child_node(xmlnode, 'Viewpoint', self.viewpoints)
write_child_node(xmlnode, 'ContentComponent', self.content_components)
write_child_node(xmlnode, 'BaseURL', self.base_urls)
write_child_node(xmlnode, 'SegmentBase', self.segment_bases)
write_child_node(xmlnode, 'SegmentList', self.segment_lists)
write_child_node(xmlnode, 'SegmentTemplate', self.segment_templates)
write_child_node(xmlnode, 'Representation', self.representations)
class EventStream(XMLNode):
def __init__(self):
self.scheme_id_uri = None # xs:anyURI (required)
self.value = None # xs:string
self.timescale = None # xs:unsignedInt
self.events = None # EventType*
def parse(self, xmlnode):
self.scheme_id_uri = parse_attr_value(xmlnode, 'schemeIdUri', str)
self.value = parse_attr_value(xmlnode, 'value', str)
self.timescale = parse_attr_value(xmlnode, 'timescale', int)
self.events = parse_child_nodes(xmlnode, 'Event', Event)
def write(self, xmlnode):
write_attr_value(xmlnode, 'schemeIdUri', self.scheme_id_uri)
write_attr_value(xmlnode, 'value', self.value)
write_attr_value(xmlnode, 'timescale', self.timescale)
write_child_node(xmlnode, 'Event', self.events)
class Period(XMLNode):
def __init__(self):
self.id = None # xs:string
self.start = None # xs:duration
self.duration = None # xs:duration
self.bitstream_switching = None # xs:boolean
self.base_urls = None # BaseURLType*
self.segment_bases = None # SegmentBaseType*
self.segment_lists = None # SegmentListType*
self.segment_templates = None # SegmentTemplateType*
self.asset_identifiers = None # DescriptorType*
self.event_streams = None # EventStreamType*
self.adaptation_sets = None # AdaptationSetType*
self.subsets = None # SubsetType*
def parse(self, xmlnode):
self.id = parse_attr_value(xmlnode, 'id', str)
self.start = parse_attr_value(xmlnode, 'start', str)
self.duration = parse_attr_value(xmlnode, 'duration', str)
self.bitstream_switching = parse_attr_value(xmlnode, 'bitstreamSwitching', bool)
self.base_urls = parse_child_nodes(xmlnode, 'BaseURL', BaseURL)
self.segment_bases = parse_child_nodes(xmlnode, 'SegmentBase', SegmentBase)
self.segment_lists = parse_child_nodes(xmlnode, 'SegmentList', SegmentList)
self.segment_templates = parse_child_nodes(xmlnode, 'SegmentTemplate', SegmentTemplate)
self.asset_identifiers = parse_child_nodes(xmlnode, 'AssetIdentifier', Descriptor)
self.event_streams = parse_child_nodes(xmlnode, 'EventStream', EventStream)
self.adaptation_sets = parse_child_nodes(xmlnode, 'AdaptationSet', AdaptationSet)
self.subsets = parse_child_nodes(xmlnode, 'Subset', Subset)
def write(self, xmlnode):
write_attr_value(xmlnode, 'id', self.id)
write_attr_value(xmlnode, 'start', self.start)
write_attr_value(xmlnode, 'duration', self.duration)
write_attr_value(xmlnode, 'bitstreamSwitching', self.bitstream_switching)
write_child_node(xmlnode, 'BaseURL', self.base_urls)
write_child_node(xmlnode, 'SegmentBase', self.segment_bases)
write_child_node(xmlnode, 'SegmentList', self.segment_lists)
write_child_node(xmlnode, 'SegmentTemplate', self.segment_templates)
write_child_node(xmlnode, 'AssetIdentifier', self.asset_identifiers)
write_child_node(xmlnode, 'EventStream', self.event_streams)
write_child_node(xmlnode, 'AdaptationSet', self.adaptation_sets)
write_child_node(xmlnode, 'Subset', self.subsets)
class MPEGDASH(XMLNode):
def __init__(self):
self.xmlns = None # xmlns
self.id = None # xs:string
self.type = None # PresentationType
self.profiles = '' # xs:string (required)
self.availability_start_time = None # xs:dateTime
self.availability_end_time = None # xs:dateTime
self.publish_time = None # xs:dateTime
self.media_presentation_duration = None # xs:duration
self.minimum_update_period = None # xs:duration
self.min_buffer_time = None # xs:duration
self.time_shift_buffer_depth = None # xs:duration
self.suggested_presentation_delay = None # xs:duration
self.max_segment_duration = None # xs:duration
self.max_subsegment_duration = None # xs:duration
self.program_informations = None # ProgramInformationType*
self.base_urls = None # BaseURLType*
self.locations = None # xs:anyURI*
self.periods = None # PeriodType+
self.metrics = None # MetricsType*
self.utc_timings = None # DescriptorType*
def parse(self, xmlnode):
self.xmlns = parse_attr_value(xmlnode, 'xmlns', str)
self.id = parse_attr_value(xmlnode, 'id', str)
self.type = parse_attr_value(xmlnode, 'type', str)
self.profiles = parse_attr_value(xmlnode, 'profiles', str)
self.availability_start_time = parse_attr_value(xmlnode, 'availabilityStartTime', str)
self.availability_end_time = parse_attr_value(xmlnode, 'availabilityEndTime', str)
self.publish_time = parse_attr_value(xmlnode, 'publishTime', str)
self.media_presentation_duration = parse_attr_value(xmlnode, 'mediaPresentationDuration', str)
self.minimum_update_period = parse_attr_value(xmlnode, 'minimumUpdatePeriod', str)
self.min_buffer_time = parse_attr_value(xmlnode, 'minBufferTime', str)
self.time_shift_buffer_depth = parse_attr_value(xmlnode, 'timeShiftBufferDepth', str)
self.suggested_presentation_delay = parse_attr_value(xmlnode, 'suggestedPresentationDelay', str)
self.max_segment_duration = parse_attr_value(xmlnode, 'maxSegmentDuration', str)
self.max_subsegment_duration = parse_attr_value(xmlnode, 'maxSubsegmentDuration', str)
self.program_informations = parse_child_nodes(xmlnode, 'ProgramInformation', ProgramInformation)
self.base_urls = parse_child_nodes(xmlnode, 'BaseURL', BaseURL)
self.locations = parse_child_nodes(xmlnode, 'Location', XsStringElement)
self.periods = parse_child_nodes(xmlnode, 'Period', Period)
self.metrics = parse_child_nodes(xmlnode, 'Metrics', Metrics)
self.utc_timings = parse_child_nodes(xmlnode, 'UTCTiming', Descriptor)
def write(self, xmlnode):
write_attr_value(xmlnode, 'xmlns', self.xmlns)
write_attr_value(xmlnode, 'id', self.id)
write_attr_value(xmlnode, 'type', self.type)
write_attr_value(xmlnode, 'profiles', self.profiles)
write_attr_value(xmlnode, 'availabilityStartTime', self.availability_start_time)
write_attr_value(xmlnode, 'availabilityEndTime', self.availability_end_time)
write_attr_value(xmlnode, 'publishTime', self.publish_time)
write_attr_value(xmlnode, 'mediaPresentationDuration', self.media_presentation_duration)
write_attr_value(xmlnode, 'minimumUpdatePeriod', self.minimum_update_period)
write_attr_value(xmlnode, 'minBufferTime', self.min_buffer_time)
write_attr_value(xmlnode, 'timeShiftBufferDepth', self.time_shift_buffer_depth)
write_attr_value(xmlnode, 'suggestedPresentationDelay', self.suggested_presentation_delay)
write_attr_value(xmlnode, 'maxSegmentDuration', self.max_segment_duration)
write_attr_value(xmlnode, 'maxSubsegmentDuration', self.max_subsegment_duration)
write_child_node(xmlnode, 'ProgramInformation', self.program_informations)
write_child_node(xmlnode, 'BaseURL', self.base_urls)
write_child_node(xmlnode, 'Location', self.locations)
write_child_node(xmlnode, 'Period', self.periods)
write_child_node(xmlnode, 'Metrics', self.metrics)
write_child_node(xmlnode, 'UTCTiming', self.utc_timings)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for Linear and DNN joined training models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import linear
from tensorflow.python.estimator.canned import optimizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.summary import summary
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
from tensorflow.python.util.tf_export import tf_export
# The default learning rates are a historical artifact of the initial
# implementation.
_DNN_LEARNING_RATE = 0.001
_LINEAR_LEARNING_RATE = 0.005
def _check_no_sync_replicas_optimizer(optimizer):
if isinstance(optimizer, sync_replicas_optimizer.SyncReplicasOptimizer):
raise ValueError(
'SyncReplicasOptimizer does not support multi optimizers case. '
'Therefore, it is not supported in DNNLinearCombined model. '
'If you want to use this optimizer, please use either DNN or Linear '
'model.')
def _linear_learning_rate(num_linear_feature_columns):
"""Returns the default learning rate of the linear model.
The calculation is a historical artifact of this initial implementation, but
has proven a reasonable choice.
Args:
num_linear_feature_columns: The number of feature columns of the linear
model.
Returns:
A float.
"""
default_learning_rate = 1. / math.sqrt(num_linear_feature_columns)
return min(_LINEAR_LEARNING_RATE, default_learning_rate)
def _add_layer_summary(value, tag):
summary.scalar('%s/fraction_of_zero_values' % tag, nn.zero_fraction(value))
summary.histogram('%s/activation' % tag, value)
def _dnn_linear_combined_model_fn(features,
labels,
mode,
head,
linear_feature_columns=None,
linear_optimizer='Ftrl',
dnn_feature_columns=None,
dnn_optimizer='Adagrad',
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
input_layer_partitioner=None,
config=None):
"""Deep Neural Net and Linear combined model_fn.
Args:
features: dict of `Tensor`.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype
`int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `Head` instance.
linear_feature_columns: An iterable containing all the feature columns used
by the Linear model.
linear_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the Linear model. Defaults to the Ftrl
optimizer.
dnn_feature_columns: An iterable containing all the feature columns used by
the DNN model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN model. Defaults to the Adagrad
optimizer.
dnn_hidden_units: List of hidden units per DNN layer.
dnn_activation_fn: Activation function applied to each DNN layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability we will drop out a given DNN
coordinate.
input_layer_partitioner: Partitioner for input layer.
config: `RunConfig` object to configure the runtime settings.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: If both `linear_feature_columns` and `dnn_features_columns`
are empty at the same time, or `input_layer_partitioner` is missing,
or features has the wrong type.
"""
if not isinstance(features, dict):
raise ValueError('features should be a dictionary of `Tensor`s. '
'Given type: {}'.format(type(features)))
if not linear_feature_columns and not dnn_feature_columns:
raise ValueError(
'Either linear_feature_columns or dnn_feature_columns must be defined.')
num_ps_replicas = config.num_ps_replicas if config else 0
input_layer_partitioner = input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
# Build DNN Logits.
dnn_parent_scope = 'dnn'
if not dnn_feature_columns:
dnn_logits = None
else:
dnn_optimizer = optimizers.get_optimizer_instance(
dnn_optimizer, learning_rate=_DNN_LEARNING_RATE)
_check_no_sync_replicas_optimizer(dnn_optimizer)
if not dnn_hidden_units:
raise ValueError(
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified.')
dnn_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas))
with variable_scope.variable_scope(
dnn_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=dnn_partitioner):
dnn_logit_fn = dnn._dnn_logit_fn_builder( # pylint: disable=protected-access
units=head.logits_dimension,
hidden_units=dnn_hidden_units,
feature_columns=dnn_feature_columns,
activation_fn=dnn_activation_fn,
dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner)
dnn_logits = dnn_logit_fn(features=features, mode=mode)
linear_parent_scope = 'linear'
if not linear_feature_columns:
linear_logits = None
else:
linear_optimizer = optimizers.get_optimizer_instance(
linear_optimizer,
learning_rate=_linear_learning_rate(len(linear_feature_columns)))
_check_no_sync_replicas_optimizer(linear_optimizer)
with variable_scope.variable_scope(
linear_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner) as scope:
logit_fn = linear._linear_logit_fn_builder( # pylint: disable=protected-access
units=head.logits_dimension,
feature_columns=linear_feature_columns)
linear_logits = logit_fn(features=features)
_add_layer_summary(linear_logits, scope.name)
# Combine logits and build full model.
if dnn_logits is not None and linear_logits is not None:
logits = dnn_logits + linear_logits
elif dnn_logits is not None:
logits = dnn_logits
else:
logits = linear_logits
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
train_ops = []
global_step = training_util.get_global_step()
if dnn_logits is not None:
train_ops.append(
dnn_optimizer.minimize(
loss,
var_list=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES,
scope=dnn_parent_scope)))
if linear_logits is not None:
train_ops.append(
linear_optimizer.minimize(
loss,
var_list=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES,
scope=linear_parent_scope)))
train_op = control_flow_ops.group(*train_ops)
with ops.control_dependencies([train_op]):
return distribute_lib.increment_var(global_step)
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
@tf_export('estimator.DNNLinearCombinedClassifier')
class DNNLinearCombinedClassifier(estimator.Estimator):
"""An estimator for TensorFlow Linear and DNN joined classification models.
Note: This estimator is also known as wide-n-deep.
Example:
```python
numeric_feature = numeric_column(...)
categorical_column_a = categorical_column_with_hash_bucket(...)
categorical_column_b = categorical_column_with_hash_bucket(...)
categorical_feature_a_x_categorical_feature_b = crossed_column(...)
categorical_feature_a_emb = embedding_column(
categorical_column=categorical_feature_a, ...)
categorical_feature_b_emb = embedding_column(
categorical_id_column=categorical_feature_b, ...)
estimator = DNNLinearCombinedClassifier(
# wide settings
linear_feature_columns=[categorical_feature_a_x_categorical_feature_b],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[
categorical_feature_a_emb, categorical_feature_b_emb,
numeric_feature],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...),
# warm-start settings
warm_start_from="/path/to/checkpoint/dir")
# To apply L1 and L2 regularization, you can set optimizers as follows:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
# It is same for FtrlOptimizer.
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss is calculated by using softmax cross entropy.
@compatibility(eager)
Estimators are not compatible with eager execution.
@end_compatibility
"""
def __init__(self,
model_dir=None,
linear_feature_columns=None,
linear_optimizer='Ftrl',
dnn_feature_columns=None,
dnn_optimizer='Adagrad',
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
input_layer_partitioner=None,
config=None,
warm_start_from=None,
loss_reduction=losses.Reduction.SUM):
"""Initializes a DNNLinearCombinedClassifier instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. Defaults to FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. Defaults to Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
n_classes: Number of label classes. Defaults to 2, namely binary
classification. Must be > 1.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
label_vocabulary: A list of strings represents possible label values. If
given, labels must be string type and have any value in
`label_vocabulary`. If it is not given, that means labels are
already encoded as integer or float within [0, 1] for `n_classes=2` and
encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .
Also there will be errors if vocabulary is not provided and labels are
string.
input_layer_partitioner: Partitioner for input layer. Defaults to
`min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: RunConfig object to configure the runtime settings.
warm_start_from: A string filepath to a checkpoint to warm-start from, or
a `WarmStartSettings` object to fully configure warm-starting. If the
string filepath is provided instead of a `WarmStartSettings`, then all
weights are warm-started, and it is assumed that vocabularies and Tensor
names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
linear_feature_columns = linear_feature_columns or []
dnn_feature_columns = dnn_feature_columns or []
self._feature_columns = (
list(linear_feature_columns) + list(dnn_feature_columns))
if not self._feature_columns:
raise ValueError('Either linear_feature_columns or dnn_feature_columns '
'must be defined.')
if n_classes == 2:
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
else:
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
def _model_fn(features, labels, mode, config):
"""Call the _dnn_linear_combined_model_fn."""
return _dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(DNNLinearCombinedClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
warm_start_from=warm_start_from)
@tf_export('estimator.DNNLinearCombinedRegressor')
class DNNLinearCombinedRegressor(estimator.Estimator):
"""An estimator for TensorFlow Linear and DNN joined models for regression.
Note: This estimator is also known as wide-n-deep.
Example:
```python
numeric_feature = numeric_column(...)
categorical_column_a = categorical_column_with_hash_bucket(...)
categorical_column_b = categorical_column_with_hash_bucket(...)
categorical_feature_a_x_categorical_feature_b = crossed_column(...)
categorical_feature_a_emb = embedding_column(
categorical_column=categorical_feature_a, ...)
categorical_feature_b_emb = embedding_column(
categorical_column=categorical_feature_b, ...)
estimator = DNNLinearCombinedRegressor(
# wide settings
linear_feature_columns=[categorical_feature_a_x_categorical_feature_b],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[
categorical_feature_a_emb, categorical_feature_b_emb,
numeric_feature],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...),
# warm-start settings
warm_start_from="/path/to/checkpoint/dir")
# To apply L1 and L2 regularization, you can set optimizers as follows:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
# It is same for FtrlOptimizer.
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss is calculated by using mean squared error.
@compatibility(eager)
Estimators are not compatible with eager execution.
@end_compatibility
"""
def __init__(self,
model_dir=None,
linear_feature_columns=None,
linear_optimizer='Ftrl',
dnn_feature_columns=None,
dnn_optimizer='Adagrad',
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
label_dimension=1,
weight_column=None,
input_layer_partitioner=None,
config=None,
warm_start_from=None,
loss_reduction=losses.Reduction.SUM):
"""Initializes a DNNLinearCombinedRegressor instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. Defaults to FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. Defaults to Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
input_layer_partitioner: Partitioner for input layer. Defaults to
`min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: RunConfig object to configure the runtime settings.
warm_start_from: A string filepath to a checkpoint to warm-start from, or
a `WarmStartSettings` object to fully configure warm-starting. If the
string filepath is provided instead of a `WarmStartSettings`, then all
weights are warm-started, and it is assumed that vocabularies and Tensor
names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
linear_feature_columns = linear_feature_columns or []
dnn_feature_columns = dnn_feature_columns or []
self._feature_columns = (
list(linear_feature_columns) + list(dnn_feature_columns))
if not self._feature_columns:
raise ValueError('Either linear_feature_columns or dnn_feature_columns '
'must be defined.')
def _model_fn(features, labels, mode, config):
"""Call the _dnn_linear_combined_model_fn."""
return _dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head_lib. # pylint: disable=protected-access
_regression_head_with_mean_squared_error_loss(
label_dimension=label_dimension, weight_column=weight_column,
loss_reduction=loss_reduction),
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(DNNLinearCombinedRegressor, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
warm_start_from=warm_start_from)
| |
#!/usr/bin/env python
"""
Communicate with an Amazon Fire TV device via ADB over a network.
ADB Debugging must be enabled.
"""
import logging
import re
from socket import error as socket_error
import sys
import threading
# Install adb shell if we can, then try the others
USE_ADB_SHELL = False
try:
from adb_shell.adb_device import AdbDevice, AdbDeviceTcp
from adb_shell.auth.sign_pythonrsa import PythonRSASigner
from adb_shell.exceptions import InvalidChecksumError
USE_ADB_SHELL = True
except:
pass
if not USE_ADB_SHELL:
from adb import adb_commands
from adb.sign_pythonrsa import PythonRSASigner
from adb.adb_protocol import InvalidChecksumError
from adb_messenger.client import Client as AdbClient
Signer = PythonRSASigner.FromRSAKeyPath
if sys.version_info[0] > 2 and sys.version_info[1] > 1:
LOCK_KWARGS = {'timeout': 3}
else:
LOCK_KWARGS = {}
# Matches window windows output for app & activity name gathering
WINDOW_REGEX = re.compile(r"Window\{(?P<id>.+?) (?P<user>.+) (?P<package>.+?)(?:\/(?P<activity>.+?))?\}$", re.MULTILINE)
# ADB shell commands for getting the `screen_on`, `awake`, `wake_lock`,
# `wake_lock_size`, `current_app`, and `running_apps` properties
SCREEN_ON_CMD = "dumpsys power | grep 'Display Power' | grep -q 'state=ON'"
AWAKE_CMD = "dumpsys power | grep mWakefulness | grep -q Awake"
WAKE_LOCK_CMD = "dumpsys power | grep Locks | grep -q 'size=0'"
WAKE_LOCK_SIZE_CMD = "dumpsys power | grep Locks | grep 'size='"
CURRENT_APP_CMD = "dumpsys window windows | grep mCurrentFocus"
RUNNING_APPS_CMD = "ps | grep u0_a"
# echo '1' if the previous shell command was successful
SUCCESS1 = r" && echo -e '1\c'"
# echo '1' if the previous shell command was successful, echo '0' if it was not
SUCCESS1_FAILURE0 = r" && echo -e '1\c' || echo -e '0\c'"
# ADB key event codes.
HOME = 3
CENTER = 23
VOLUME_UP = 24
VOLUME_DOWN = 25
POWER = 26
SLEEP = 223
PLAY_PAUSE = 85
NEXT = 87
PREVIOUS = 88
PLAY = 126
PAUSE = 127
UP = 19
DOWN = 20
LEFT = 21
RIGHT = 22
ENTER = 66
SPACE = 62
BACK = 4
MENU = 1
KEY_0 = 7
KEY_1 = 8
KEY_2 = 9
KEY_3 = 10
KEY_4 = 11
KEY_5 = 12
KEY_6 = 13
KEY_7 = 14
KEY_8 = 15
KEY_9 = 16
KEY_A = 29
KEY_B = 30
KEY_C = 31
KEY_D = 32
KEY_E = 33
KEY_F = 34
KEY_G = 35
KEY_H = 36
KEY_I = 37
KEY_J = 38
KEY_K = 39
KEY_L = 40
KEY_M = 41
KEY_N = 42
KEY_O = 43
KEY_P = 44
KEY_Q = 45
KEY_R = 46
KEY_S = 47
KEY_T = 48
KEY_U = 49
KEY_V = 50
KEY_W = 51
KEY_X = 52
KEY_Y = 53
KEY_Z = 54
# Select key codes for use by a Home Assistant service.
KEYS = {'POWER': POWER,
'SLEEP': SLEEP,
'HOME': HOME,
'CENTER': CENTER,
'BACK': BACK,
'MENU': MENU,
'UP': UP,
'DOWN': DOWN,
'LEFT': LEFT,
'RIGHT': RIGHT}
# Fire TV states.
STATE_ON = 'on'
STATE_IDLE = 'idle'
STATE_OFF = 'off'
STATE_PLAYING = 'playing'
STATE_PAUSED = 'paused'
STATE_STANDBY = 'standby'
STATE_UNKNOWN = 'unknown'
# Apps.
PACKAGE_LAUNCHER = "com.amazon.tv.launcher"
PACKAGE_SETTINGS = "com.amazon.tv.settings"
AMAZON_VIDEO = 'com.amazon.avod'
KODI = 'org.xbmc.kodi'
NETFLIX = 'com.netflix.ninja'
APPS = {AMAZON_VIDEO: 'Amazon Video',
KODI: 'Kodi',
NETFLIX: 'Netflix'}
# Intents.
INTENT_LAUNCH = "android.intent.category.LAUNCHER"
INTENT_HOME = "android.intent.category.HOME"
class FireTV:
"""Represents an Amazon Fire TV device."""
def __init__(self, host, adbkey='', adb_server_ip='', adb_server_port=5037):
"""Initialize FireTV object.
:param host: Host in format <address>:port.
:param adbkey: The path to the "adbkey" file
:param adb_server_ip: the IP address for the ADB server
:param adb_server_port: the port for the ADB server
"""
self.host = host
self.adbkey = adbkey
self.adb_server_ip = adb_server_ip
self.adb_server_port = adb_server_port
# keep track of whether the ADB connection is intact
self._available = False
# use a lock to make sure that ADB commands don't overlap
self._adb_lock = threading.Lock()
# the attributes used for sending ADB commands; filled in in `self.connect()`
self._adb = None # python-adb
self._adb_client = None # pure-python-adb
self._adb_device = None # pure-python-adb && adb_shell
# the methods used for sending ADB commands
if USE_ADB_SHELL:
# adb_shell
self.adb_shell = self._adb_shell_adb_shell
self.adb_streaming_shell = self._adb_shell_adb_shell
elif not self.adb_server_ip:
# python-adb
self.adb_shell = self._adb_shell_python_adb
self.adb_streaming_shell = self._adb_streaming_shell_python_adb
else:
# pure-python-adb
self.adb_shell = self._adb_shell_pure_python_adb
self.adb_streaming_shell = self._adb_streaming_shell_pure_python_adb
# establish the ADB connection
self.connect()
# ======================================================================= #
# #
# ADB methods #
# #
# ======================================================================= #
def _adb_shell_adb_shell(self, cmd):
if not self.available:
return None
if self._adb_lock.acquire(**LOCK_KWARGS):
try:
return self._adb_device.shell(cmd)
finally:
self._adb_lock.release()
def _adb_shell_python_adb(self, cmd):
if not self.available:
return None
if self._adb_lock.acquire(**LOCK_KWARGS):
try:
return self._adb.Shell(cmd)
finally:
self._adb_lock.release()
def _adb_shell_pure_python_adb(self, cmd):
if not self._available:
return None
if self._adb_lock.acquire(**LOCK_KWARGS):
try:
return self._adb_device.shell(cmd)
finally:
self._adb_lock.release()
def _adb_streaming_shell_adb_shell(self, cmd):
if not self.available:
return []
if self._adb_lock.acquire(**LOCK_KWARGS):
try:
return self._adb_device.shell(cmd)
finally:
self._adb_lock.release()
def _adb_streaming_shell_python_adb(self, cmd):
if not self.available:
return []
if self._adb_lock.acquire(**LOCK_KWARGS):
try:
return self._adb.StreamingShell(cmd)
finally:
self._adb_lock.release()
def _adb_streaming_shell_pure_python_adb(self, cmd):
if not self._available:
return None
# this is not yet implemented
if self._adb_lock.acquire(**LOCK_KWARGS):
try:
return []
finally:
self._adb_lock.release()
def _dump(self, service, grep=None):
"""Perform a service dump.
:param service: Service to dump.
:param grep: Grep for this string.
:returns: Dump, optionally grepped.
"""
if grep:
return self.adb_shell('dumpsys {0} | grep "{1}"'.format(service, grep))
return self.adb_shell('dumpsys {0}'.format(service))
def _dump_has(self, service, grep, search):
"""Check if a dump has particular content.
:param service: Service to dump.
:param grep: Grep for this string.
:param search: Check for this substring.
:returns: Found or not.
"""
dump_grep = self._dump(service, grep=grep)
if not dump_grep:
return False
return dump_grep.strip().find(search) > -1
def _key(self, key):
"""Send a key event to device.
:param key: Key constant.
"""
self.adb_shell('input keyevent {0}'.format(key))
def _ps(self, search=''):
"""Perform a ps command with optional filtering.
:param search: Check for this substring.
:returns: List of matching fields
"""
if not self.available:
return
result = []
ps = self.adb_streaming_shell('ps')
try:
for bad_line in ps:
# The splitting of the StreamingShell doesn't always work
# this is to ensure that we get only one line
for line in bad_line.splitlines():
if search in line:
result.append(line.strip().rsplit(' ', 1)[-1])
return result
except InvalidChecksumError as e:
print(e)
self.connect()
raise IOError
def _send_intent(self, pkg, intent, count=1):
cmd = 'monkey -p {} -c {} {}; echo $?'.format(pkg, intent, count)
logging.debug("Sending an intent %s to %s (count: %s)", intent, pkg, count)
# adb shell outputs in weird format, so we cut it into lines,
# separate the retcode and return info to the user
res = self.adb_shell(cmd)
if res is None:
return {}
res = res.strip().split("\r\n")
retcode = res[-1]
output = "\n".join(res[:-1])
return {"retcode": retcode, "output": output}
def connect(self, always_log_errors=True):
"""Connect to an Amazon Fire TV device.
Will attempt to establish ADB connection to the given host.
Failure sets state to UNKNOWN and disables sending actions.
:returns: True if successful, False otherwise
"""
self._adb_lock.acquire(**LOCK_KWARGS)
signer = None
if self.adbkey:
signer = Signer(self.adbkey)
try:
if USE_ADB_SHELL:
# adb_shell
host, _, port = self.host.partition(':')
self._adb_device = AdbDeviceTcp(host=host, port=port)
# Connect to the device
connected = False
from adb_shell.exceptions import DeviceAuthError
try:
if signer:
connected = self._adb_device.connect(rsa_keys=[signer])
else:
connected = self._adb_device.connect()
except DeviceAuthError as err:
print("DeviceAuthError:", err)
self._available = connected
elif not self.adb_server_ip:
# python-adb
from adb.usb_exceptions import DeviceAuthError
try:
if self.adbkey:
signer = Signer(self.adbkey)
# Connect to the device
self._adb = adb_commands.AdbCommands().ConnectDevice(serial=self.host, rsa_keys=[signer], default_timeout_ms=9000)
else:
self._adb = adb_commands.AdbCommands().ConnectDevice(serial=self.host, default_timeout_ms=9000)
# ADB connection successfully established
self._available = True
except socket_error as serr:
if self._available or always_log_errors:
if serr.strerror is None:
serr.strerror = "Timed out trying to connect to ADB device."
logging.warning("Couldn't connect to host: %s, error: %s", self.host, serr.strerror)
# ADB connection attempt failed
self._adb = None
self._available = False
except DeviceAuthError as err:
print("DeviceAuthError:", err)
finally:
return self._available
else:
# pure-python-adb
try:
self._adb_client = AdbClient(host=self.adb_server_ip, port=self.adb_server_port)
self._adb_device = self._adb_client.device(self.host)
self._available = bool(self._adb_device)
except:
self._available = False
finally:
return self._available
finally:
self._adb_lock.release()
# ======================================================================= #
# #
# Home Assistant Update #
# #
# ======================================================================= #
def update(self, get_running_apps=True):
"""Get the state of the device, the current app, and the running apps.
:param get_running_apps: whether or not to get the ``running_apps`` property
:return state: the state of the device
:return current_app: the current app
:return running_apps: the running apps
"""
# The `screen_on`, `awake`, `wake_lock_size`, `current_app`, and `running_apps` properties.
screen_on, awake, wake_lock_size, _current_app, running_apps = self.get_properties(get_running_apps=get_running_apps, lazy=True)
# Check if device is off.
if not screen_on:
state = STATE_OFF
current_app = None
running_apps = None
# Check if screen saver is on.
elif not awake:
state = STATE_IDLE
current_app = None
running_apps = None
else:
# Get the current app.
if isinstance(_current_app, dict) and 'package' in _current_app:
current_app = _current_app['package']
else:
current_app = None
# Get the running apps.
if running_apps is None and current_app:
running_apps = [current_app]
# Get the state.
# TODO: determine the state differently based on the `current_app`.
if current_app in [PACKAGE_LAUNCHER, PACKAGE_SETTINGS]:
state = STATE_STANDBY
# Amazon Video
elif current_app == AMAZON_VIDEO:
if wake_lock_size == 5:
state = STATE_PLAYING
else:
# wake_lock_size == 2
state = STATE_PAUSED
# Netflix
elif current_app == NETFLIX:
if wake_lock_size > 3:
state = STATE_PLAYING
else:
state = STATE_PAUSED
# Check if `wake_lock_size` is 1 (device is playing).
elif wake_lock_size == 1:
state = STATE_PLAYING
# Otherwise, device is paused.
else:
state = STATE_PAUSED
return state, current_app, running_apps
# ======================================================================= #
# #
# App methods #
# #
# ======================================================================= #
def app_state(self, app):
"""Informs if application is running."""
if not self.available or not self.screen_on:
return STATE_OFF
if self.current_app["package"] == app:
return STATE_ON
return STATE_OFF
def launch_app(self, app):
"""Launch an app."""
return self._send_intent(app, INTENT_LAUNCH)
def stop_app(self, app):
"""Stop an app."""
return self.adb_shell("am force-stop {0}".format(app))
# ======================================================================= #
# #
# properties #
# #
# ======================================================================= #
@property
def state(self):
"""Compute and return the device state.
:returns: Device state.
"""
# Check if device is disconnected.
if not self.available:
return STATE_UNKNOWN
# Check if device is off.
if not self.screen_on:
return STATE_OFF
# Check if screen saver is on.
if not self.awake:
return STATE_IDLE
# Check if the launcher is active.
if self.launcher or self.settings:
return STATE_STANDBY
# Check for a wake lock (device is playing).
if self.wake_lock:
return STATE_PLAYING
# Otherwise, device is paused.
return STATE_PAUSED
@property
def available(self):
"""Check whether the ADB connection is intact."""
if USE_ADB_SHELL:
# adb_shell
if not self._adb_device:
return False
return self._adb_device.available
if not self.adb_server_ip:
# python-adb
return bool(self._adb)
# pure-python-adb
try:
# make sure the server is available
adb_devices = self._adb_client.devices()
# make sure the device is available
try:
# case 1: the device is currently available
if any([self.host in dev.get_serial_no() for dev in adb_devices]):
if not self._available:
self._available = True
return True
# case 2: the device is not currently available
if self._available:
logging.error('ADB server is not connected to the device.')
self._available = False
return False
except RuntimeError:
if self._available:
logging.error('ADB device is unavailable; encountered an error when searching for device.')
self._available = False
return False
except RuntimeError:
if self._available:
logging.error('ADB server is unavailable.')
self._available = False
return False
@property
def running_apps(self):
"""Return a list of running user applications."""
ps = self.adb_shell(RUNNING_APPS_CMD)
if ps:
return [line.strip().rsplit(' ', 1)[-1] for line in ps.splitlines() if line.strip()]
return []
@property
def current_app(self):
"""Return the current app."""
current_focus = self.adb_shell(CURRENT_APP_CMD)
if current_focus is None:
return None
current_focus = current_focus.replace("\r", "")
matches = WINDOW_REGEX.search(current_focus)
# case 1: current app was successfully found
if matches:
(pkg, activity) = matches.group("package", "activity")
return {"package": pkg, "activity": activity}
# case 2: current app could not be found
logging.warning("Couldn't get current app, reply was %s", current_focus)
return None
@property
def screen_on(self):
"""Check if the screen is on."""
return self.adb_shell(SCREEN_ON_CMD + SUCCESS1_FAILURE0) == '1'
@property
def awake(self):
"""Check if the device is awake (screensaver is not running)."""
return self.adb_shell(AWAKE_CMD + SUCCESS1_FAILURE0) == '1'
@property
def wake_lock(self):
"""Check for wake locks (device is playing)."""
return self.adb_shell(WAKE_LOCK_CMD + SUCCESS1_FAILURE0) == '1'
@property
def wake_lock_size(self):
"""Get the size of the current wake lock."""
output = self.adb_shell(WAKE_LOCK_SIZE_CMD)
if not output:
return None
return int(output.split("=")[1].strip())
@property
def launcher(self):
"""Check if the active application is the Amazon TV launcher."""
return self.current_app["package"] == PACKAGE_LAUNCHER
@property
def settings(self):
"""Check if the active application is the Amazon menu."""
return self.current_app["package"] == PACKAGE_SETTINGS
def get_properties(self, get_running_apps=True, lazy=False):
"""Get the ``screen_on``, ``awake``, ``wake_lock_size``, ``current_app``, and ``running_apps`` properties."""
if get_running_apps:
output = self.adb_shell(SCREEN_ON_CMD + (SUCCESS1 if lazy else SUCCESS1_FAILURE0) + " && " +
AWAKE_CMD + (SUCCESS1 if lazy else SUCCESS1_FAILURE0) + " && " +
WAKE_LOCK_SIZE_CMD + " && " +
CURRENT_APP_CMD + " && " +
RUNNING_APPS_CMD)
else:
output = self.adb_shell(SCREEN_ON_CMD + (SUCCESS1 if lazy else SUCCESS1_FAILURE0) + " && " +
AWAKE_CMD + (SUCCESS1 if lazy else SUCCESS1_FAILURE0) + " && " +
WAKE_LOCK_SIZE_CMD + " && " +
CURRENT_APP_CMD)
# ADB command was unsuccessful
if output is None:
return None, None, None, None, None
# `screen_on` property
if not output:
return False, False, -1, None, None
screen_on = output[0] == '1'
# `awake` property
if len(output) < 2:
return screen_on, False, -1, None, None
awake = output[1] == '1'
lines = output.strip().splitlines()
# `wake_lock_size` property
if len(lines[0]) < 3:
return screen_on, awake, -1, None, None
wake_lock_size = int(lines[0].split("=")[1].strip())
# `current_app` property
if len(lines) < 2:
return screen_on, awake, wake_lock_size, None, None
matches = WINDOW_REGEX.search(lines[1])
if matches:
# case 1: current app was successfully found
(pkg, activity) = matches.group("package", "activity")
current_app = {"package": pkg, "activity": activity}
else:
# case 2: current app could not be found
current_app = None
# `running_apps` property
if not get_running_apps or len(lines) < 3:
return screen_on, awake, wake_lock_size, current_app, None
running_apps = [line.strip().rsplit(' ', 1)[-1] for line in lines[2:] if line.strip()]
return screen_on, awake, wake_lock_size, current_app, running_apps
# ======================================================================= #
# #
# turn on/off methods #
# #
# ======================================================================= #
def turn_on(self):
"""Send power action if device is off."""
self.adb_shell(SCREEN_ON_CMD + " || (input keyevent {0} && input keyevent {1})".format(POWER, HOME))
def turn_off(self):
"""Send power action if device is not off."""
self.adb_shell(SCREEN_ON_CMD + " && input keyevent {0}".format(SLEEP))
# ======================================================================= #
# #
# "key" methods: basic commands #
# #
# ======================================================================= #
def power(self):
"""Send power action."""
self._key(POWER)
def sleep(self):
"""Send sleep action."""
self._key(SLEEP)
def home(self):
"""Send home action."""
self._key(HOME)
def up(self):
"""Send up action."""
self._key(UP)
def down(self):
"""Send down action."""
self._key(DOWN)
def left(self):
"""Send left action."""
self._key(LEFT)
def right(self):
"""Send right action."""
self._key(RIGHT)
def enter(self):
"""Send enter action."""
self._key(ENTER)
def back(self):
"""Send back action."""
self._key(BACK)
def space(self):
"""Send space keypress."""
self._key(SPACE)
def menu(self):
"""Send menu action."""
self._key(MENU)
def volume_up(self):
"""Send volume up action."""
self._key(VOLUME_UP)
def volume_down(self):
"""Send volume down action."""
self._key(VOLUME_DOWN)
# ======================================================================= #
# #
# "key" methods: media commands #
# #
# ======================================================================= #
def media_play_pause(self):
"""Send media play/pause action."""
self._key(PLAY_PAUSE)
def media_play(self):
"""Send media play action."""
self._key(PLAY)
def media_pause(self):
"""Send media pause action."""
self._key(PAUSE)
def media_next(self):
"""Send media next action (results in fast-forward)."""
self._key(NEXT)
def media_previous(self):
"""Send media previous action (results in rewind)."""
self._key(PREVIOUS)
# ======================================================================= #
# #
# "key" methods: key commands #
# #
# ======================================================================= #
def key_0(self):
"""Send 0 keypress."""
self._key(KEY_0)
def key_1(self):
"""Send 1 keypress."""
self._key(KEY_1)
def key_2(self):
"""Send 2 keypress."""
self._key(KEY_2)
def key_3(self):
"""Send 3 keypress."""
self._key(KEY_3)
def key_4(self):
"""Send 4 keypress."""
self._key(KEY_4)
def key_5(self):
"""Send 5 keypress."""
self._key(KEY_5)
def key_6(self):
"""Send 6 keypress."""
self._key(KEY_6)
def key_7(self):
"""Send 7 keypress."""
self._key(KEY_7)
def key_8(self):
"""Send 8 keypress."""
self._key(KEY_8)
def key_9(self):
"""Send 9 keypress."""
self._key(KEY_9)
def key_a(self):
"""Send a keypress."""
self._key(KEY_A)
def key_b(self):
"""Send b keypress."""
self._key(KEY_B)
def key_c(self):
"""Send c keypress."""
self._key(KEY_C)
def key_d(self):
"""Send d keypress."""
self._key(KEY_D)
def key_e(self):
"""Send e keypress."""
self._key(KEY_E)
def key_f(self):
"""Send f keypress."""
self._key(KEY_F)
def key_g(self):
"""Send g keypress."""
self._key(KEY_G)
def key_h(self):
"""Send h keypress."""
self._key(KEY_H)
def key_i(self):
"""Send i keypress."""
self._key(KEY_I)
def key_j(self):
"""Send j keypress."""
self._key(KEY_J)
def key_k(self):
"""Send k keypress."""
self._key(KEY_K)
def key_l(self):
"""Send l keypress."""
self._key(KEY_L)
def key_m(self):
"""Send m keypress."""
self._key(KEY_M)
def key_n(self):
"""Send n keypress."""
self._key(KEY_N)
def key_o(self):
"""Send o keypress."""
self._key(KEY_O)
def key_p(self):
"""Send p keypress."""
self._key(KEY_P)
def key_q(self):
"""Send q keypress."""
self._key(KEY_Q)
def key_r(self):
"""Send r keypress."""
self._key(KEY_R)
def key_s(self):
"""Send s keypress."""
self._key(KEY_S)
def key_t(self):
"""Send t keypress."""
self._key(KEY_T)
def key_u(self):
"""Send u keypress."""
self._key(KEY_U)
def key_v(self):
"""Send v keypress."""
self._key(KEY_V)
def key_w(self):
"""Send w keypress."""
self._key(KEY_W)
def key_x(self):
"""Send x keypress."""
self._key(KEY_X)
def key_y(self):
"""Send y keypress."""
self._key(KEY_Y)
def key_z(self):
"""Send z keypress."""
self._key(KEY_Z)
| |
# No shebang line, this module is meant to be imported
#
# Copyright 2013 Oliver Palmer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Resource
--------
Base resources which can be used to build top leve
documents, pages, or other types of data for the web.
"""
from json import loads
try:
from httplib import (
responses, NOT_FOUND, BAD_REQUEST, UNSUPPORTED_MEDIA_TYPE,
METHOD_NOT_ALLOWED, INTERNAL_SERVER_ERROR, OK, NOT_ACCEPTABLE)
except ImportError: # pragma: no cover
from http.client import (
responses, NOT_FOUND, BAD_REQUEST, UNSUPPORTED_MEDIA_TYPE,
METHOD_NOT_ALLOWED, INTERNAL_SERVER_ERROR, OK, NOT_ACCEPTABLE)
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.web.server import NOT_DONE_YET
from twisted.web.resource import Resource as _Resource
from twisted.web.static import File
from twisted.web.http import Request
from voluptuous import Invalid, Schema
from pyfarm.core.enums import STRING_TYPES
from pyfarm.agent.http.core import template
from pyfarm.agent.logger import getLogger
from pyfarm.agent.utility import dumps
logger = getLogger("agent.http.resource")
class Resource(_Resource):
"""
Basic subclass of :class:`._Resource` for passing requests to
specific methods. Unlike :class:`._Resource` however this will
will also handle:
* Templates
* Content type discovery and validation
* Handling of deferred responses
* Validation of POST/PUT data against a schema
:cvar string TEMPLATE:
The name of the template this class will use when rendering
an html view.
:type SCHEMAS: dict
:cvar SCHEMAS:
A dictionary of schemas to validate the data of an incoming request
against. The structure of this dictionary is::
{http method: <instance of voluptuous.Schema>}
If the schema validation fails the request will be rejected with
``400 BAD REQUEST``.
:type ALLOWED_CONTENT_TYPE: frozenset
:cvar ALLOWED_CONTENT_TYPE:
An instance of :class:`frozenset` which describes what this resource
is going to allow in the ``Content-Type`` header. The request
and this instance must share at least on entry in common. If not,
the request will be rejected with ``415 UNSUPPORTED MEDIA TYPE``.
**This must be defined in subclass**
:type ALLOWED_ACCEPT: frozenset
:cvar ALLOWED_ACCEPT:
An instance of :class:`frozenset` which describes what this
resource is going to allow in the ``Accept`` header. The request
and this instance must share at least one entry in common. If not,
the request will be rejected with ``406 NOT ACCEPTABLE``.
**This must be defined in subclass**
:type DEFAULT_ACCEPT: frozenset
:cvar DEFAULT_ACCEPT:
If ``Accept`` header is not present in the request, use this as the
value instead. This defaults to ``frozenset(["*/*"])``
:type DEFAULT_CONTENT_TYPE: frozenset
:cvar DEFAULT_CONTENT_TYPE:
If ``Content-Type`` header is not present in the request, use this as
the value instead. This defaults to ``frozenset([""])``
"""
TEMPLATE = NotImplemented
SCHEMAS = {}
# These must be set in a subclass and
# should contain the full range of headers
# allowed for Accept and Content-Type.
ALLOWED_ACCEPT = NotImplemented
ALLOWED_CONTENT_TYPE = NotImplemented
# Default values if certain headers
# are not present.
DEFAULT_ACCEPT = frozenset(["*/*"])
DEFAULT_CONTENT_TYPE = frozenset([None])
@property
def template(self):
"""
Loads the template provided but the partial path in ``TEMPLATE`` on
the class.
"""
if self.TEMPLATE is NotImplemented:
raise NotImplementedError("You must set `TEMPLATE` first")
return template.load(self.TEMPLATE)
def methods(self):
"""
Returns a tuple of methods which an instance of this class implements
"""
methods = []
for method_name in ("get", "put", "post", "delete", "head"):
method = getattr(self, method_name, None)
if method is not None and callable(method):
methods.append(method_name)
return tuple(methods)
def get_content_type(self, request):
"""
Return the ``Content-Type`` header(s) in the request or
``DEFAULT_CONTENT_TYPE`` if the header is not set.
"""
header = request.requestHeaders.getRawHeaders("Content-Type")
if not header:
return self.DEFAULT_CONTENT_TYPE
content_type = set()
for value in header:
# Split out the various parts of the header and return them. We
# ignore the q parameter here for the moment.
content_type.update(
entry.split(";")[0] for entry in value.split(","))
return content_type
def get_accept(self, request):
"""
Return the ``Accept`` header(s) in the request or
``DEFAULT_ACCEPT`` if the header is not set.
"""
header = request.requestHeaders.getRawHeaders("Accept")
if not header:
return self.DEFAULT_ACCEPT
accept = set()
for value in header:
# Split out the various parts of the header and return them. We
# ignore the q parameter here for the moment.
accept.update(entry.split(";")[0] for entry in value.split(","))
return frozenset(accept)
def putChild(self, path, child):
"""
Overrides the builtin putChild() so we can return the results for
each call and use them externally.
"""
assert isinstance(path, STRING_TYPES)
assert isinstance(child, (Resource, File))
_Resource.putChild(self, path, child)
return child
def error(self, request, code, message):
"""
Writes the proper out an error response message depending on the
content type in the request
"""
response_types = self.get_accept(request)
logger.error(message)
if "text/html" in response_types:
request.setResponseCode(code)
html_error = template.load("error.html")
result = html_error.render(
code=code, code_msg=responses[code], message=message)
request.write(result.encode())
elif "application/json" in response_types:
request.setResponseCode(code)
request.write(dumps({"error": message}))
else:
request.setResponseCode(UNSUPPORTED_MEDIA_TYPE)
error = dumps(
{"error":
"Can only handle one of %s here" % self.ALLOWED_ACCEPT})
request.write(error)
request.finish()
def set_response_code_if_not_set(self, request, code):
"""Sets the response code if one has not already been set"""
if request.code == OK:
request.setResponseCode(code)
def render_tuple(self, request, response):
"""
Takes a response tuple of ``(body, code, headers)`` or
``(body, code)`` and renders the resulting data onto
the request.
"""
assert isinstance(response, (list, tuple)), type(response)
if len(response) == 3:
body, code, headers = response
if isinstance(headers, dict):
for header, value in headers.items():
if isinstance(value, STRING_TYPES):
value = [value]
request.responseHeaders.setRawHeaders(header, value)
if not request.responseHeaders.hasHeader("Content-Type"):
request.responseHeaders.setRawHeaders(
"Content-Type",
list(self.DEFAULT_CONTENT_TYPE)
)
# Don't use set_response_code_if_not_set, always honor the return
# value from the function.
request.setResponseCode(code)
# Cast to str, otherwise Twisted responds
# TypeError: Data must not be unicode
request.write(str(body))
request.finish()
elif len(response) == 2:
body, code = response
# Set Content-Type if it has not already been set
if not request.responseHeaders.hasHeader("Content-Type"):
request.responseHeaders.setRawHeaders(
"Content-Type",
list(self.DEFAULT_CONTENT_TYPE)
)
# Don't use set_response_code_if_not_set, always honor the return
# value from the function.
request.setResponseCode(code)
# Cast to str, otherwise Twisted responds
# TypeError: Data must not be unicode
request.write(str(body))
request.finish()
else:
self.error(
request, INTERNAL_SERVER_ERROR,
"Expected two or three length tuple for response"
)
@inlineCallbacks
def render_deferred(self, request, deferred):
"""
An inline callback used to unpack a deferred
response object.
"""
assert isinstance(deferred, Deferred)
response = yield deferred
self.render_tuple(request, response)
def render(self, request):
try:
handler_method = getattr(self, request.method.lower())
except AttributeError:
self.error(
request, METHOD_NOT_ALLOWED,
"Method %s is not supported" % request.method)
return NOT_DONE_YET
assert isinstance(self.ALLOWED_CONTENT_TYPE, (set, frozenset))
content = request.content.read().strip()
shared_content_types = \
self.get_content_type(request) & self.ALLOWED_CONTENT_TYPE
# Ensure we can handle the content of the request
if content and not shared_content_types:
self.error(
request, UNSUPPORTED_MEDIA_TYPE,
"Can only support content "
"type(s) %s" % self.ALLOWED_CONTENT_TYPE)
return NOT_DONE_YET
# Determine if we'll be able to produce a response for the request
assert isinstance(self.ALLOWED_ACCEPT, (set, frozenset))
response_types = self.get_accept(request) & self.ALLOWED_ACCEPT
if not response_types:
self.error(
request, NOT_ACCEPTABLE,
"Can only respond with %s" % self.ALLOWED_ACCEPT)
return NOT_DONE_YET
# Keywords to pass into `handler_method` below
kwargs = dict(request=request)
# Attempt to load the data for the incoming request if appropriate
if content and "application/json" in shared_content_types:
try:
data = loads(content)
except ValueError as e:
self.error(
request, BAD_REQUEST,
"Failed to decode json data: %r" % e)
return NOT_DONE_YET
# We have data, check to see if we have a schema
# and if we do does it validate.
schema = self.SCHEMAS.get(request.method)
if isinstance(schema, Schema):
try:
schema(data)
except Invalid as e:
self.error(
request, BAD_REQUEST,
"Failed to validate the request data "
"against the schema: %s" % e)
return NOT_DONE_YET
kwargs.update(data=data)
try:
response = handler_method(**kwargs)
except Exception as error:
self.error(
request, INTERNAL_SERVER_ERROR,
"Unhandled error while rendering response: %s" % error
)
return NOT_DONE_YET
# The handler_method is going to handle everything
if response == NOT_DONE_YET:
return NOT_DONE_YET
# Flask style response
elif isinstance(response, tuple):
self.render_tuple(request, response)
return NOT_DONE_YET
# handler_method() is returns a Deferred which means
# we have to handle writing the response ourselves
elif isinstance(response, Deferred):
self.render_deferred(request, response)
return NOT_DONE_YET
elif isinstance(response, STRING_TYPES):
# Set Content-Type if it has not already been set
if not request.responseHeaders.hasHeader("Content-Type"):
request.responseHeaders.setRawHeaders(
"Content-Type",
list(self.DEFAULT_CONTENT_TYPE)
)
self.set_response_code_if_not_set(request, OK)
request.write(response)
request.finish()
return NOT_DONE_YET
else:
self.error(
request, INTERNAL_SERVER_ERROR,
"Unhandled %r in response" % response
)
return NOT_DONE_YET
| |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Pre-configured remote application for enabling sign in/up with GitHub.
1. Ensure you have ``github3.py`` package installed:
.. code-block:: console
cdvirtualenv src/invenio-oauthclient
pip install -e .[github]
2. Edit your configuration and add:
.. code-block:: python
from invenio_oauthclient.contrib import github
OAUTHCLIENT_REMOTE_APPS = dict(
github=github.REMOTE_APP,
)
GITHUB_APP_CREDENTIALS = dict(
consumer_key='changeme',
consumer_secret='changeme',
)
3. Go to GitHub and register a new application:
https://github.com/settings/applications/new. When registering the
application ensure that the *Authorization callback URL* points to:
``CFG_SITE_SECURE_URL/oauth/authorized/github/`` (e.g.
``http://localhost:4000/oauth/authorized/github/`` for development).
4. Grab the *Client ID* and *Client Secret* after registering the application
and add them to your instance configuration (``invenio.cfg``):
.. code-block:: python
GITHUB_APP_CREDENTIALS = dict(
consumer_key='<CLIENT ID>',
consumer_secret='<CLIENT SECRET>',
)
5. Now go to ``CFG_SITE_SECURE_URL/oauth/login/github/`` (e.g.
http://localhost:4000/oauth/login/github/)
6. Also, you should see GitHub listed under Linked accounts:
http://localhost:4000//account/settings/linkedaccounts/
By default the GitHub module will try first look if a link already exists
between a GitHub account and a user. If no link is found, the module tries to
retrieve the user email address from GitHub to match it with a local user. If
this fails, the user is asked to provide an email address to sign-up.
In templates you can add a sign in/up link:
.. code-block:: jinja
<a href='{{url_for('invenio_oauthclient.login', remote_app='github')}}'>
Sign in with GitHub
</a>
For more details you can play with a :doc:`working example <examplesapp>`.
"""
import github3
from flask import current_app, redirect, url_for
from flask_login import current_user
from invenio_db import db
from invenio_oauthclient.errors import OAuthResponseError
from invenio_oauthclient.handlers import authorized_signup_handler, \
oauth_error_handler
from invenio_oauthclient.models import RemoteAccount
from invenio_oauthclient.utils import oauth_link_external_id, \
oauth_unlink_external_id
REMOTE_APP = dict(
title='GitHub',
description='Software collaboration platform.',
icon='fa fa-github',
authorized_handler='invenio_oauthclient.handlers'
':authorized_signup_handler',
disconnect_handler='invenio_oauthclient.contrib.github'
':disconnect_handler',
signup_handler=dict(
info='invenio_oauthclient.contrib.github:account_info',
setup='invenio_oauthclient.contrib.github:account_setup',
view='invenio_oauthclient.handlers:signup_handler',
),
params=dict(
request_token_params={'scope': 'user,user:email'},
base_url='https://api.github.com/',
request_token_url=None,
access_token_url='https://github.com/login/oauth/access_token',
access_token_method='POST',
authorize_url='https://github.com/login/oauth/authorize',
app_key='GITHUB_APP_CREDENTIALS',
)
)
"""GitHub remote application configuration."""
def _extract_email(gh):
"""Get user email from github."""
return next(
(x.email for x in gh.emails() if x.verified and x.primary), None)
def account_info(remote, resp):
"""Retrieve remote account information used to find local user.
It returns a dictionary with the following structure:
.. code-block:: python
{
'user': {
'email': '...',
'profile': {
'username': '...',
'full_name': '...',
}
},
'external_id': 'github-unique-identifier',
'external_method': 'github',
}
Information inside the user dictionary are available for other modules.
For example, they are used from the module invenio-userprofiles to fill
the user profile.
:param remote: The remote application.
:param resp: The response.
:returns: A dictionary with the user information.
"""
gh = github3.login(token=resp['access_token'])
me = gh.me()
return dict(
user=dict(
email=_extract_email(gh),
profile=dict(
username=me.login,
full_name=me.name,
),
),
external_id=str(me.id),
external_method='github'
)
def account_setup(remote, token, resp):
"""Perform additional setup after user have been logged in.
:param remote: The remote application.
:param token: The token value.
:param resp: The response.
"""
gh = github3.login(token=resp['access_token'])
with db.session.begin_nested():
me = gh.me()
token.remote_account.extra_data = {'login': me.login, 'id': me.id}
# Create user <-> external id link.
oauth_link_external_id(
token.remote_account.user, dict(
id=str(me.id),
method='github')
)
@oauth_error_handler
def authorized(resp, remote):
"""Authorized callback handler for GitHub.
:param resp: The response.
:param remote: The remote application.
"""
if resp and 'error' in resp:
if resp['error'] == 'bad_verification_code':
# See https://developer.github.com/v3/oauth/#bad-verification-code
# which recommends starting auth flow again.
return redirect(url_for('invenio_oauthclient.login',
remote_app='github'))
elif resp['error'] in ['incorrect_client_credentials',
'redirect_uri_mismatch']:
raise OAuthResponseError(
'Application mis-configuration in GitHub', remote, resp
)
return authorized_signup_handler(resp, remote)
def disconnect_handler(remote, *args, **kwargs):
"""Handle unlinking of remote account.
:param remote: The remote application.
:returns: The HTML response.
"""
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
remote_account = RemoteAccount.get(user_id=current_user.get_id(),
client_id=remote.consumer_key)
external_method = 'github'
external_ids = [i.id for i in current_user.external_identifiers
if i.method == external_method]
if external_ids:
oauth_unlink_external_id(dict(id=external_ids[0],
method=external_method))
if remote_account:
with db.session.begin_nested():
remote_account.delete()
return redirect(url_for('invenio_oauthclient_settings.index'))
| |
'''
All of these views are predicated on the user already being logged in to
valid session.
djago_ag/views.py
John Whelchel
Summer 2013
These are the views for the Acquisition Gateway section of the administration
site. They are all decorated with @authenticate to make sure that a user is
logged in; if not, they are redirected to the login page. Some are decorated
with precheck, a decorator that makes sure the passed g_id and passwords
are valid.
'''
import logging
import json
from django.http import HttpResponse
from django.shortcuts import redirect
from django.template import Context, loader, RequestContext
from django.views.decorators.csrf import csrf_exempt
from django.forms.formsets import formset_factory
from django_lib.auth import authenticate
from django_lib.decorators import precheck
from django_lib import gatewayforms
from django_lib import forms as libforms
from storage.storagetypes import transactional
import storage.storage as db
from MS.volume import Volume
from MS.user import SyndicateUser as User
from MS.gateway import AcquisitionGateway as AG
# This is the view to be redirected to when precheck fails; i.e.
# the given password or g_id is wrong.
PRECHECK_REDIRECT = 'django_ag.views.viewgateway'
@authenticate
def viewgateway(request, g_id=0):
'''
The view for viewing and changing any of the main settings on any AG. Passes
forms for changing different settings, and the volumes attached to the gateway.
'''
session = request.session
username = session['login_email']
g_id = int(g_id)
# Check for passed error messages or inital data from session-state.
message = session.pop('message', "")
ag_initial_data = session.get('ag_initial_data' + str(g_id), [])
# Make sure this gateway actually exists.
g = db.read_acquisition_gateway(g_id)
if not g:
logging.error("Error reading gateway %s : Does not exist." % (g_id))
message = "No acquisition gateway with the ID %s exists." % g_id
t = loader.get_template("gateway_templates/viewgateway_failure.html")
c = Context({'message':message, 'username':username})
return HttpResponse(t.render(c))
# Create forms for changing location, adding volumes,
# changing password, getting password, and changing config
location_form = gatewayforms.ModifyGatewayLocation(initial={'host':g.host,
'port':g.port})
add_form = gatewayforms.GatewayAddVolume()
json_form = gatewayforms.ModifyGatewayConfig()
password_form = libforms.Password()
change_password_form = libforms.ChangePassword()
# Get all attached volumes and their respective owners
owners = []
vols = []
for v_id in g.volume_ids:
vol = db.read_volume( v_id )
if not vol:
logging.error("Volume ID in gateways volume_ids does not map to volume. Gateway: %s" % g_name)
else:
vols.append(vol)
attrs = {"SyndicateUser.owner_id ==":vol.owner_id}
owners.append(db.get_user(attrs))
vol_owners = zip(vols, owners)
# Create formatted data based on vols for the formset, if not passed in state.
if not ag_initial_data:
for v in vols:
ag_initial_data.append({'volume_name':v.name,
'remove':False})
session['ag_initial_data' + str(g_id)] = ag_initial_data
VolumeFormSet = formset_factory(gatewayforms.GatewayRemoveVolume, extra=0)
if ag_initial_data:
formset = VolumeFormSet(initial=ag_initial_data)
else:
formset = None
t = loader.get_template("gateway_templates/viewacquisitiongateway.html")
c = RequestContext(request, {'username':username,
'gateway':g,
'message':message,
'vol_owners':vol_owners,
'location_form':location_form,
'add_form':add_form,
'json_form':json_form,
'remove_forms':formset,
'password_form':password_form,
'change_password_form':change_password_form})
return HttpResponse(t.render(c))
@authenticate
@precheck("AG", PRECHECK_REDIRECT)
def changejson(request, g_id):
'''
Handler for changing json config file.
'''
session = request.session
username = session['login_email']
g_id = int(g_id)
form = gatewayforms.ModifyGatewayConfig(request.POST)
if form.is_valid():
g = db.read_acquisition_gateway(g_id)
if not g:
session['message'] = "Gateway with ID {} does not exist.".format(g_id)
return redirect('django_ag.views.viewgateway', g_id)
# Verify upload success
if 'json_config' not in request.FILES:
session['message'] = "No uploaded file."
return redirect('django_ag.views.viewgateway', g_id)
if request.FILES['json_config'].multiple_chunks():
session['message'] = "Uploaded file too large; please make smaller than 2.5M"
return redirect('django_ag.views.viewgateway', g_id)
config = request.FILES['json_config'].read()
# Verify JSON format
fields = {}
try:
fields['json_config'] = json.loads(config)
except Exception as e:
# File didn't read as JSON: try adding quotes on both sides just in case.
logging.info("Possible JSON load error: %s" % e)
try:
fields['json_config'] = json.loads("\"" + config + "\"")
except Exception as e:
logging.error("Definite JSON load error %s" % e)
session['message'] = "Error parsing given JSON text."
return redirect('django_ag.views.viewgateway', g_id)
# Update and redirect
db.update_acquisition_gateway(g_id, **fields)
session['new_change'] = "We've changed your gateways's JSON configuration."
session['next_url'] = '/syn/AG/viewgateway/' + str(g_id)
session['next_message'] = "Click here to go back to your gateway."
return redirect('/syn/thanks')
else:
session['message'] = "Invalid form. Did you upload a file?"
return redirect('django_ag.views.viewgateway', g_id)
# Doesn't use precheck() because doesn't use Password() form, just ChangePassword() form.
@authenticate
def changepassword(request, g_id):
'''
Handler for changing gateway password. Since it can't use precheck because of password reasons,
must verify POST-ness itself.
'''
session = request.session
username = session['login_email']
g_id = int(g_id)
# Precheck
if request.method != "POST":
return redirect('/syn/AG/viewgateway/' + str(g_id))
try:
g = db.read_acquisition_gateway(g_id)
if not g:
raise Exception("No gateway exists.")
except Exception as e:
logging.error("Error reading gateway with ID %d : Exception: %s" % (g_id, e))
message = "No acquisition gateway with the ID %d exists." % g_id
t = loader.get_template("gateway_templates/viewgateway_failure.html")
c = Context({'message':message, 'username':username})
return HttpResponse(t.render(c))
form = libforms.ChangePassword(request.POST)
if not form.is_valid():
session['message'] = "You must fill out all password fields."
return redirect('django_ag.views.viewgateway', g_id)
else:
# Check password hash
if not AG.authenticate(g, form.cleaned_data['oldpassword']):
session['message'] = "Incorrect password."
return redirect('django_ag.views.viewgateway', g_id)
elif form.cleaned_data['newpassword_1'] != form.cleaned_data['newpassword_2']:
session['message'] = "Your new passwords did not match each other."
return redirect('django_ag.views.viewgateway', g_id)
# Ok to change password, then redirect
else:
new_hash = AG.generate_password_hash(form.cleaned_data['newpassword_1'])
fields = {'ms_password_hash':new_hash}
try:
db.update_acquisition_gateway(g_id, **fields)
except Exception as e:
logging.error("Unable to update acquisition gateway %d. Exception %s" % (g_id, e))
session['message'] = "Unable to update gateway."
return redirect('django_ag.views.viewgateway', g_id)
session['new_change'] = "We've changed your gateways's password."
session['next_url'] = '/syn/AG/viewgateway/' + str(g_id)
session['next_message'] = "Click here to go back to your gateway."
return redirect('/syn/thanks')
@authenticate
@precheck("AG", PRECHECK_REDIRECT)
def addvolume(request, g_id):
'''
Handler for adding a volume to the gateay.
'''
# This is a helper method that isolates the @transactional decorator, speeding
# up the code when it doesn't reach update() in this view and allowing for
# errors that would break in GAE if the decorator was applied to the entire view.
@transactional(xg=True)
def update(v_id, g_id, vfields, gfields):
db.update_volume(v_id, **vfields)
db.update_acquisition_gateway(g_id, **gfields)
session.pop('ag_initial_data' + str(g_id))
session = request.session
username = session['login_email']
g_id = int(g_id)
form = gatewayforms.GatewayAddVolume(request.POST)
if form.is_valid():
attrs = {"Volume.name ==":form.cleaned_data['volume_name'].strip().replace(" ", "_")}
vols = db.list_volumes(attrs, limit=1)
if vols:
volume = vols[0]
logging.info(volume)
else:
session['message'] = "The volume %s doesn't exist." % form.cleaned_data['volume_name']
return redirect('django_ag.views.viewgateway', g_id)
gateway = db.read_acquisition_gateway(g_id)
# Prepare upcoming volume state
if volume.ag_ids:
new_ags = volume.ag_ids
new_ags.append(gateway.g_id)
else:
new_ags = [gateway.g_id]
vfields = {'ag_ids':new_ags}
# Preare upcoming AG state
old_vids = gateway.volume_ids
new_vid = volume.volume_id
if new_vid in old_vids:
session['message'] = "That volume is already attached to this gateway!"
return redirect('django_ag.views.viewgateway', g_id)
if old_vids:
old_vids.append(new_vid)
new_vids = old_vids
else:
new_vids = [new_vid]
# Update and redirect
try:
gfields={'volume_ids':new_vids}
update(volume.volume_id, g_id, vfields, gfields)
except Exception as e:
logging.error("Unable to update acquisition gateway %s or volume %s. Exception %s" % (gateway.ms_username, form.cleaned_data['volume_name'], e))
session['message'] = "Unable to update gateway."
return redirect('django_ag.views.viewgateway', g_id)
session['new_change'] = "We've updated your AG's volumes."
session['next_url'] = '/syn/AG/viewgateway/' + str(g_id)
session['next_message'] = "Click here to go back to your gateway."
return redirect('/syn/thanks')
else:
session['message'] = "Invalid entries for adding volumes."
return redirect('django_ag.views.viewgateway', g_id)
@authenticate
@precheck("AG", PRECHECK_REDIRECT)
def removevolumes(request, g_id):
'''
This handler allows removal of one or many volumes from an Aqcquisition
Gateway. It calls multi_update() as a helper method to allow transactional
updates to the database.
'''
# This is a helper method that isolates the @transactional decorator, speeding
# up the code when it doesn't reach update() in this view and allowing for
# errors that would break in GAE if the decorator was applied to the entire view.
# It updates multiple volumes at once
@transactional(xg=True)
def multi_update(v_ids, g_id, vfields, gfields):
for v_id, vfield in zip(v_ids, vfields):
db.update_volume(v_id, **vfield)
db.update_acquisition_gateway(g_id, **gfields)
session.pop('ag_initial_data' + str(g_id))
session = request.session
username = session['login_email']
g_id = int(g_id)
VolumeFormSet = formset_factory(gatewayforms.GatewayRemoveVolume, extra=0)
formset = VolumeFormSet(request.POST)
# This call is not checked because the formset will always be valid (readonly widgets)
formset.is_valid()
volume_ids_to_be_removed = []
new_ags_set = []
initial_and_forms = zip(session.get('ag_initial_data' + str(g_id), []), formset.forms)
for i, f in initial_and_forms:
if f.cleaned_data['remove']:
attrs = {"Volume.name ==":i['volume_name']}
vols = db.list_volumes(attrs, limit=1)
vol = vols[0]
# update each volume's new AG list
new_ags = vol.ag_ids
new_ags.remove(int(g_id))
new_ags_set.append({'ag_ids':new_ags})
# update info for AG update
volume_ids_to_be_removed.append(vol.volume_id)
if not volume_ids_to_be_removed:
session['message'] = "You must select at least one volume to remove."
return redirect('django_ag.views.viewgateway', g_id)
old_vids = set(db.read_acquisition_gateway(g_id).volume_ids)
new_vids = list(old_vids - set(volume_ids_to_be_removed))
gfields = {'volume_ids':new_vids}
try:
multi_update(volume_ids_to_be_removed, g_id, new_ags_set, gfields)
except Exception as e:
logging.error("Unable to update acquisition gateway %s. Exception %s" % (g_id, e))
session['message'] = "Unable to update gateway."
return redirect('django_ag.views.viewgateway', g_id)
session['new_change'] = "We've updated your AG's volumes."
session['next_url'] = '/syn/AG/viewgateway/' + str(g_id)
session['next_message'] = "Click here to go back to your gateway."
return redirect('/syn/thanks')
@authenticate
@precheck("AG", PRECHECK_REDIRECT)
def changelocation(request, g_id):
'''
Handler for changing the host:port of the gateway.
'''
session = request.session
username = session['login_email']
g_id = int(g_id)
form = gatewayforms.ModifyGatewayLocation(request.POST)
if form.is_valid():
new_host = form.cleaned_data['host']
new_port = form.cleaned_data['port']
fields = {'host':new_host, 'port':new_port}
# Update and redirect
try:
db.update_acquisition_gateway(g_id, **fields)
except Exception as e:
logging.error("Unable to update AG: %d. Error was %s." % (g_id, e))
session['message'] = "Error. Unable to change acquisition gateway."
return redirect('django_ag.views.viewgateway', g_id)
session['new_change'] = "We've updated your AG."
session['next_url'] = '/syn/AG/viewgateway/' + str(g_id)
session['next_message'] = "Click here to go back to your gateway."
return redirect('/syn/thanks')
else:
session['message'] = "Invalid form entries for gateway location."
return redirect('django_ag.views.viewgateway', g_id)
@authenticate
def allgateways(request):
'''
View to look at all AG's in a tabular format, with owners and attached volumes.
'''
session = request.session
username = session['login_email']
# Get gateways
try:
qry = db.list_acquisition_gateways()
except:
qry = []
gateways = []
for g in qry:
gateways.append(g)
# Get volumes and owners
vols = []
g_owners = []
for g in gateways:
volset = []
for v in g.volume_ids:
volset.append(db.read_volume(v))
vols.append(volset)
attrs = {"SyndicateUser.owner_id ==":g.owner_id}
g_owners.append(db.get_user(attrs))
gateway_vols_owners = zip(gateways, vols, g_owners)
t = loader.get_template('gateway_templates/allacquisitiongateways.html')
c = RequestContext(request, {'username':username, 'gateway_vols_owners':gateway_vols_owners})
return HttpResponse(t.render(c))
@authenticate
def create(request):
'''
View to handle creation of AG's
'''
session = request.session
username = session['login_email']
user = db.read_user( username )
# Helper method used to simplify error-handling. When fields are entered incorrectly,
# a session message is set and this method is called.
def give_create_form(username, session):
message = session.pop('message', "")
form = gatewayforms.CreateAG()
t = loader.get_template('gateway_templates/create_acquisition_gateway.html')
c = RequestContext(request, {'username':username,'form':form, 'message':message})
return HttpResponse(t.render(c))
if request.POST:
# Validate input forms
form = gatewayforms.CreateAG(request.POST, request.FILES)
if form.is_valid():
kwargs = {}
# Try and load JSON config file/data, if present. First check uploaded files, then
# the text box.
if "json_config" in request.FILES:
if request.FILES['json_config'].multiple_chunks():
session['message'] = "Uploaded file too large; please make smaller than 2.5M"
return give_create_form(username, session)
config = request.FILES['json_config'].read()
try:
kwargs['json_config'] = json.loads(config)
except Exception as e:
logging.info("Possible JSON load error: %s" % e)
try:
kwargs['json_config'] = json.loads("\"" + config + "\"")
except Exception as e:
logging.error("Definite JSON load error %s" % e)
session['message'] = "Error parsing given JSON text."
return give_create_form(username, session)
# No upload, check text box.
elif "json_config_text" in form.cleaned_data:
try:
kwargs['json_config'] = json.loads(form.cleaned_data['json_config_text'])
except Exception as e:
logging.info("Possible JSON load error: %s" % e)
try:
kwargs['json_config'] = json.loads("\"" + str(form.cleaned_data['json_config_text']) + "\"")
except Exception as e:
logging.error("Definite JSON load error %s" % e)
session['message'] = "Error parsing given JSON text."
return give_create_form(username, session)
try:
kwargs['ms_username'] = form.cleaned_data['g_name']
kwargs['port'] = form.cleaned_data['port']
kwargs['host'] = form.cleaned_data['host']
kwargs['ms_password'] = form.cleaned_data['g_password']
new_ag = db.create_acquisition_gateway(user, **kwargs)
except Exception as E:
session['message'] = "AG creation error: %s" % E
return give_create_form(username, session)
session['new_change'] = "Your new gateway is ready."
session['next_url'] = '/syn/AG/allgateways'
session['next_message'] = "Click here to see your acquisition gateways."
return redirect('/syn/thanks/')
else:
# Prep returned form values (so they don't have to re-enter stuff)
if 'g_name' in form.errors:
oldname = ""
else:
oldname = request.POST['g_name']
if 'host' in form.errors:
oldhost = ""
else:
oldhost = request.POST['host']
if 'port' in form.errors:
oldport = ""
else:
oldport = request.POST['port']
oldjson = request.POST['json_config_text']
# Prep error message
message = "Invalid form entry: "
for k, v in form.errors.items():
message = message + "\"" + k + "\"" + " -> "
for m in v:
message = message + m + " "
# Give them the form again
form = gatewayforms.CreateAG(initial={'g_name': oldname,
'host': oldhost,
'port': oldport,
'json_config_text':oldjson,
})
t = loader.get_template('gateway_templates/create_acquisition_gateway.html')
c = RequestContext(request, {'username':username,'form':form, 'message':message})
return HttpResponse(t.render(c))
else:
# Not a POST, give them blank form
return give_create_form(username, session)
@authenticate
def delete(request, g_id):
'''
View for deleting AG.
'''
# Helper method used to simplify error-handling. When fields are entered incorrectly,
# a session message is set and this method is called.
def give_delete_form(username, g, session):
message = session.pop('message', "")
form = gatewayforms.DeleteGateway()
t = loader.get_template('gateway_templates/delete_acquisition_gateway.html')
c = RequestContext(request, {'username':username, 'g':g, 'form':form, 'message':message})
return HttpResponse(t.render(c))
# Once again isolating transactional for views that update multiple entities
@transactional(xg=True)
def delete_and_update(ag_id, attached_volume_ids):
db.delete_acquisition_gateway(ag_id)
for v in attached_volume_ids:
vol = db.read_volume(v)
if not vol:
continue
new_ag_ids = vol.ag_ids
new_ag_ids.remove(ag_id)
attrs = {"ag_ids":new_ag_ids}
db.update_volume(v, **attrs)
session.pop("ag_initial_data", None)
session = request.session
username = session['login_email']
g_id = int(g_id)
ag = db.read_acquisition_gateway(g_id)
if not ag:
t = loader.get_template('gateway_templates/delete_acquisition_gateway_failure.html')
c = RequestContext(request, {'username':username})
return HttpResponse(t.render(c))
if request.POST:
# Validate input forms
form = gatewayforms.DeleteGateway(request.POST)
if form.is_valid():
if not AG.authenticate(ag, form.cleaned_data['g_password']):
session['message'] = "Incorrect Acquisition Gateway password"
return give_delete_form(username, ag, session)
if not form.cleaned_data['confirm_delete']:
session['message'] = "You must tick the delete confirmation box."
return give_delete_form(username, ag, session)
delete_and_update(g_id, ag.volume_ids)
session['new_change'] = "Your gateway has been deleted."
session['next_url'] = '/syn/AG/allgateways'
session['next_message'] = "Click here to see all acquisition gateways."
return redirect('/syn/thanks/')
# Invalid forms
else:
# Prep error message
session['message'] = "Invalid form entry: "
for k, v in form.errors.items():
session['message'] = session['message'] + "\"" + k + "\"" + " -> "
for m in v:
session['message'] = session['message'] + m + " "
return give_delete_form(username, ag, session)
else:
# Not a POST, give them blank form
return give_delete_form(username, ag, session)
@csrf_exempt
@authenticate
def urlcreate(request, g_name, g_password, host, port, volume_name="",):
'''
For debugging purposes only, allows creation of AG via pure URL
'''
session = request.session
username = session['login_email']
kwargs = {}
kwargs['port'] = int(port)
kwargs['host'] = host
kwargs['ms_username'] = g_name
kwargs['ms_password'] = g_password
if volume_name:
vol = db.get_volume_by_name(volume_name)
if not vol:
return HttpResponse("No volume %s exists." % volume_name)
if (vol.volume_id not in user.volumes_r) and (vol.volume_id not in user.volumes_rw):
return HttpResponse("Must have read rights to volume %s to create AG for it." % volume_name)
kwargs['volume_ids'] = [vol.volume_id]
try:
new_ag = db.create_acquisition_gateway(user, **kwargs)
except Exception as E:
return HttpResponse("AG creation error: %s" % E)
return HttpResponse("AG succesfully created: " + str(new_ag))
@csrf_exempt
@authenticate
def urldelete(request, g_name, g_password):
'''
For debugging purposes only, allows deletion of AG via pure URL
'''
session = request.session
username = session['login_email']
attrs = {"AcquisitionGateway.ms_username ==":g_name}
ags = db.list_acquisition_gateways(attrs, limit=1)
if ags:
ag = ags[0]
else:
return HttpResponse("AG %s does not exist." % g_name)
if not AG.authenticate(ag, g_password):
return HttpResponse("Incorrect AG password.")
db.delete_acquisition_gateway(g_name)
return HttpResponse("Gateway succesfully deleted.")
| |
"""Fans on Zigbee Home Automation networks."""
from __future__ import annotations
from abc import abstractmethod
import functools
import math
from zigpy.exceptions import ZigbeeException
import zigpy.zcl.clusters.hvac as hvac
from homeassistant.components.fan import (
ATTR_PERCENTAGE,
ATTR_PRESET_MODE,
DOMAIN,
SUPPORT_SET_SPEED,
FanEntity,
NotValidPresetModeError,
)
from homeassistant.const import STATE_UNAVAILABLE
from homeassistant.core import State, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util.percentage import (
int_states_in_range,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from .core import discovery
from .core.const import (
CHANNEL_FAN,
DATA_ZHA,
DATA_ZHA_DISPATCHERS,
SIGNAL_ADD_ENTITIES,
SIGNAL_ATTR_UPDATED,
)
from .core.registries import ZHA_ENTITIES
from .entity import ZhaEntity, ZhaGroupEntity
# Additional speeds in zigbee's ZCL
# Spec is unclear as to what this value means. On King Of Fans HBUniversal
# receiver, this means Very High.
PRESET_MODE_ON = "on"
# The fan speed is self-regulated
PRESET_MODE_AUTO = "auto"
# When the heated/cooled space is occupied, the fan is always on
PRESET_MODE_SMART = "smart"
SPEED_RANGE = (1, 3) # off is not included
PRESET_MODES_TO_NAME = {4: PRESET_MODE_ON, 5: PRESET_MODE_AUTO, 6: PRESET_MODE_SMART}
NAME_TO_PRESET_MODE = {v: k for k, v in PRESET_MODES_TO_NAME.items()}
PRESET_MODES = list(NAME_TO_PRESET_MODE)
DEFAULT_ON_PERCENTAGE = 50
STRICT_MATCH = functools.partial(ZHA_ENTITIES.strict_match, DOMAIN)
GROUP_MATCH = functools.partial(ZHA_ENTITIES.group_match, DOMAIN)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Zigbee Home Automation fan from config entry."""
entities_to_create = hass.data[DATA_ZHA][DOMAIN]
unsub = async_dispatcher_connect(
hass,
SIGNAL_ADD_ENTITIES,
functools.partial(
discovery.async_add_entities,
async_add_entities,
entities_to_create,
update_before_add=False,
),
)
hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS].append(unsub)
class BaseFan(FanEntity):
"""Base representation of a ZHA fan."""
@property
def preset_modes(self) -> list[str]:
"""Return the available preset modes."""
return PRESET_MODES
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_SET_SPEED
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return int_states_in_range(SPEED_RANGE)
async def async_turn_on(
self, speed=None, percentage=None, preset_mode=None, **kwargs
) -> None:
"""Turn the entity on."""
if percentage is None:
percentage = DEFAULT_ON_PERCENTAGE
await self.async_set_percentage(percentage)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the entity off."""
await self.async_set_percentage(0)
async def async_set_percentage(self, percentage: int | None) -> None:
"""Set the speed percenage of the fan."""
fan_mode = math.ceil(percentage_to_ranged_value(SPEED_RANGE, percentage))
await self._async_set_fan_mode(fan_mode)
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set the preset mode for the fan."""
if preset_mode not in self.preset_modes:
raise NotValidPresetModeError(
f"The preset_mode {preset_mode} is not a valid preset_mode: {self.preset_modes}"
)
await self._async_set_fan_mode(NAME_TO_PRESET_MODE[preset_mode])
@abstractmethod
async def _async_set_fan_mode(self, fan_mode: int) -> None:
"""Set the fan mode for the fan."""
@callback
def async_set_state(self, attr_id, attr_name, value):
"""Handle state update from channel."""
@STRICT_MATCH(channel_names=CHANNEL_FAN)
class ZhaFan(BaseFan, ZhaEntity):
"""Representation of a ZHA fan."""
def __init__(self, unique_id, zha_device, channels, **kwargs):
"""Init this sensor."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._fan_channel = self.cluster_channels.get(CHANNEL_FAN)
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
self.async_accept_signal(
self._fan_channel, SIGNAL_ATTR_UPDATED, self.async_set_state
)
@property
def percentage(self) -> int | None:
"""Return the current speed percentage."""
if (
self._fan_channel.fan_mode is None
or self._fan_channel.fan_mode > SPEED_RANGE[1]
):
return None
if self._fan_channel.fan_mode == 0:
return 0
return ranged_value_to_percentage(SPEED_RANGE, self._fan_channel.fan_mode)
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode."""
return PRESET_MODES_TO_NAME.get(self._fan_channel.fan_mode)
@callback
def async_set_state(self, attr_id, attr_name, value):
"""Handle state update from channel."""
self.async_write_ha_state()
async def _async_set_fan_mode(self, fan_mode: int) -> None:
"""Set the fan mode for the fan."""
await self._fan_channel.async_set_speed(fan_mode)
self.async_set_state(0, "fan_mode", fan_mode)
@GROUP_MATCH()
class FanGroup(BaseFan, ZhaGroupEntity):
"""Representation of a fan group."""
def __init__(
self, entity_ids: list[str], unique_id: str, group_id: int, zha_device, **kwargs
) -> None:
"""Initialize a fan group."""
super().__init__(entity_ids, unique_id, group_id, zha_device, **kwargs)
self._available: bool = False
group = self.zha_device.gateway.get_group(self._group_id)
self._fan_channel = group.endpoint[hvac.Fan.cluster_id]
self._percentage = None
self._preset_mode = None
@property
def percentage(self) -> int | None:
"""Return the current speed percentage."""
return self._percentage
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode."""
return self._preset_mode
async def _async_set_fan_mode(self, fan_mode: int) -> None:
"""Set the fan mode for the group."""
try:
await self._fan_channel.write_attributes({"fan_mode": fan_mode})
except ZigbeeException as ex:
self.error("Could not set fan mode: %s", ex)
self.async_set_state(0, "fan_mode", fan_mode)
async def async_update(self):
"""Attempt to retrieve on off state from the fan."""
all_states = [self.hass.states.get(x) for x in self._entity_ids]
states: list[State] = list(filter(None, all_states))
percentage_states: list[State] = [
state for state in states if state.attributes.get(ATTR_PERCENTAGE)
]
preset_mode_states: list[State] = [
state for state in states if state.attributes.get(ATTR_PRESET_MODE)
]
self._available = any(state.state != STATE_UNAVAILABLE for state in states)
if percentage_states:
self._percentage = percentage_states[0].attributes[ATTR_PERCENTAGE]
self._preset_mode = None
elif preset_mode_states:
self._preset_mode = preset_mode_states[0].attributes[ATTR_PRESET_MODE]
self._percentage = None
else:
self._percentage = None
self._preset_mode = None
async def async_added_to_hass(self) -> None:
"""Run when about to be added to hass."""
await self.async_update()
await super().async_added_to_hass()
| |
import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
("origin", True, [], True),
("openshift-enterprise", True, [], True),
("enterprise", True, [], False),
("online", True, [], False),
("invalid", True, [], False),
("", True, [], False),
("origin", False, [], False),
("openshift-enterprise", False, [], False),
("origin", False, ["nodes", "masters"], True),
("openshift-enterprise", False, ["etcd"], False),
])
def test_is_active(deployment_type, is_containerized, group_names, expect_active):
task_vars = dict(
openshift=dict(common=dict(is_containerized=is_containerized)),
openshift_deployment_type=deployment_type,
group_names=group_names,
)
assert DockerImageAvailability(None, task_vars).is_active() == expect_active
@pytest.mark.parametrize("is_containerized,is_atomic", [
(True, True),
(False, False),
(True, False),
(False, True),
])
def test_all_images_available_locally(is_containerized, is_atomic):
def execute_module(module_name, module_args, *_):
if module_name == "yum":
return {"changed": True}
assert module_name == "docker_image_facts"
assert 'name' in module_args
assert module_args['name']
return {
'images': [module_args['name']],
}
result = DockerImageAvailability(execute_module, task_vars=dict(
openshift=dict(
common=dict(
service_type='origin',
is_containerized=is_containerized,
is_atomic=is_atomic,
),
docker=dict(additional_registries=["docker.io"]),
),
openshift_deployment_type='origin',
openshift_image_tag='3.4',
group_names=['nodes', 'masters'],
)).run()
assert not result.get('failed', False)
@pytest.mark.parametrize("available_locally", [
False,
True,
])
def test_all_images_available_remotely(available_locally):
def execute_module(module_name, *_):
if module_name == 'docker_image_facts':
return {'images': [], 'failed': available_locally}
return {'changed': False}
result = DockerImageAvailability(execute_module, task_vars=dict(
openshift=dict(
common=dict(
service_type='origin',
is_containerized=False,
is_atomic=False,
),
docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]),
),
openshift_deployment_type='origin',
openshift_image_tag='v3.4',
group_names=['nodes', 'masters'],
)).run()
assert not result.get('failed', False)
def test_all_images_unavailable():
def execute_module(module_name=None, *_):
if module_name == "command":
return {
'failed': True,
}
return {
'changed': False,
}
actual = DockerImageAvailability(execute_module, task_vars=dict(
openshift=dict(
common=dict(
service_type='origin',
is_containerized=False,
is_atomic=False,
),
docker=dict(additional_registries=["docker.io"]),
),
openshift_deployment_type="openshift-enterprise",
openshift_image_tag='latest',
group_names=['nodes', 'masters'],
)).run()
assert actual['failed']
assert "required Docker images are not available" in actual['msg']
@pytest.mark.parametrize("message,extra_words", [
(
"docker image update failure",
["docker image update failure"],
),
(
"No package matching 'skopeo' found available, installed or updated",
["dependencies can be installed via `yum`"]
),
])
def test_skopeo_update_failure(message, extra_words):
def execute_module(module_name=None, *_):
if module_name == "yum":
return {
"failed": True,
"msg": message,
"changed": False,
}
return {'changed': False}
actual = DockerImageAvailability(execute_module, task_vars=dict(
openshift=dict(
common=dict(
service_type='origin',
is_containerized=False,
is_atomic=False,
),
docker=dict(additional_registries=["unknown.io"]),
),
openshift_deployment_type="openshift-enterprise",
openshift_image_tag='',
group_names=['nodes', 'masters'],
)).run()
assert actual["failed"]
for word in extra_words:
assert word in actual["msg"]
@pytest.mark.parametrize("deployment_type,registries", [
("origin", ["unknown.io"]),
("openshift-enterprise", ["registry.access.redhat.com"]),
("openshift-enterprise", []),
])
def test_registry_availability(deployment_type, registries):
def execute_module(module_name=None, *_):
return {
'changed': False,
}
actual = DockerImageAvailability(execute_module, task_vars=dict(
openshift=dict(
common=dict(
service_type='origin',
is_containerized=False,
is_atomic=False,
),
docker=dict(additional_registries=registries),
),
openshift_deployment_type=deployment_type,
openshift_image_tag='',
group_names=['nodes', 'masters'],
)).run()
assert not actual.get("failed", False)
@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
( # standard set of stuff required on nodes
"origin", False, ['nodes'], None,
set([
'openshift/origin-pod:vtest',
'openshift/origin-deployer:vtest',
'openshift/origin-docker-registry:vtest',
'openshift/origin-haproxy-router:vtest',
'cockpit/kubernetes', # origin version of registry-console
])
),
( # set a different URL for images
"origin", False, ['nodes'], 'foo.io/openshift/origin-${component}:${version}',
set([
'foo.io/openshift/origin-pod:vtest',
'foo.io/openshift/origin-deployer:vtest',
'foo.io/openshift/origin-docker-registry:vtest',
'foo.io/openshift/origin-haproxy-router:vtest',
'cockpit/kubernetes', # AFAICS this is not built from the URL
])
),
(
"origin", True, ['nodes', 'masters', 'etcd'], None,
set([
# images running on top of openshift
'openshift/origin-pod:vtest',
'openshift/origin-deployer:vtest',
'openshift/origin-docker-registry:vtest',
'openshift/origin-haproxy-router:vtest',
'cockpit/kubernetes',
# containerized component images
'openshift/origin:vtest',
'openshift/node:vtest',
'openshift/openvswitch:vtest',
'registry.access.redhat.com/rhel7/etcd',
])
),
( # enterprise images
"openshift-enterprise", True, ['nodes'], 'foo.io/openshift3/ose-${component}:f13ac45',
set([
'foo.io/openshift3/ose-pod:f13ac45',
'foo.io/openshift3/ose-deployer:f13ac45',
'foo.io/openshift3/ose-docker-registry:f13ac45',
'foo.io/openshift3/ose-haproxy-router:f13ac45',
# registry-console is not constructed/versioned the same as the others.
'registry.access.redhat.com/openshift3/registry-console',
# containerized images aren't built from oreg_url
'openshift3/node:vtest',
'openshift3/openvswitch:vtest',
])
),
(
"openshift-enterprise", True, ['etcd', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45',
set([
'registry.access.redhat.com/rhel7/etcd',
# lb does not yet come in a containerized version
])
),
])
def test_required_images(deployment_type, is_containerized, groups, oreg_url, expected):
task_vars = dict(
openshift=dict(
common=dict(
is_containerized=is_containerized,
is_atomic=False,
),
),
openshift_deployment_type=deployment_type,
group_names=groups,
oreg_url=oreg_url,
openshift_image_tag='vtest',
)
assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
def test_containerized_etcd():
task_vars = dict(
openshift=dict(
common=dict(
is_containerized=True,
),
),
openshift_deployment_type="origin",
group_names=['etcd'],
)
expected = set(['registry.access.redhat.com/rhel7/etcd'])
assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
| |
"""Custom Django User models for Stormpath.
Any application that uses django_stormpath must provide a user model with a
href field. The href is used in the authentication backend to keep track which
remote Stormpath user the local user represents. It is meant to be used in an
application that modifies user data on Stormpath. If needing to add more
fields please extend the StormpathUser class from this module.
"""
from django.conf import settings
from django.db import models, IntegrityError, transaction
from django.contrib.auth.models import (BaseUserManager,
AbstractBaseUser, PermissionsMixin)
from django.forms import model_to_dict
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import pre_save, pre_delete
from django.contrib.auth.models import Group
from django.dispatch import receiver
from django import VERSION as django_version
from stormpath.client import Client
from stormpath.error import Error as StormpathError
from stormpath.resources import AccountCreationPolicy
from django_stormpath import __version__
from django_stormpath.helpers import validate_settings
# Ensure all user settings have been properly initialized, otherwise we'll
# throw useful error messages to the user so they know what to fix.
validate_settings(settings)
# Initialize our Stormpath Client / Application objects -- this way we have
# singletons that can be used throughout our Django sessions.
USER_AGENT = 'stormpath-django/%s django/%s' % (__version__, django_version)
CLIENT = Client(
id = settings.STORMPATH_ID,
secret = settings.STORMPATH_SECRET,
user_agent = USER_AGENT,
cache_options = getattr(settings, 'STORMPATH_CACHE_OPTIONS', None)
)
APPLICATION = CLIENT.applications.get(settings.STORMPATH_APPLICATION)
def get_default_is_active():
"""
Stormpath user is active by default if e-mail verification is
disabled.
"""
directory = APPLICATION.default_account_store_mapping.account_store
verif_email = directory.account_creation_policy.verification_email_status
return verif_email == AccountCreationPolicy.EMAIL_STATUS_DISABLED
class StormpathUserManager(BaseUserManager):
def get(self, *args, **kwargs):
try:
password = kwargs.pop('password')
except KeyError:
password = None
user = super(StormpathUserManager, self).get(*args, **kwargs)
if password:
try:
APPLICATION.authenticate_account(
getattr(user, user.USERNAME_FIELD), password)
except StormpathError:
raise self.model.DoesNotExist
return user
def create(self, *args, **kwargs):
return self.create_user(*args, **kwargs)
def get_or_create(self, **kwargs):
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
return self.create(**kwargs), True
def update_or_create(self, defaults=None, **kwargs):
defaults = defaults or {}
try:
user = self.get(**kwargs)
except self.model.DoesNotExist:
kwargs.update(defaults)
return self.create(**kwargs), True
if 'password' in defaults:
user.set_password(defaults.pop('password'))
for k, v in defaults.items():
setattr(user, k, v)
user.save(using=self._db)
user._remove_raw_password()
return user, False
def _create_user(self, email, given_name, surname, password):
if not email:
raise ValueError("Users must have an email address")
if not given_name or not surname:
raise ValueError("Users must provide a given name and a surname")
user = self.model(email=StormpathUserManager.normalize_email(email),
given_name=given_name, surname=surname)
user.set_password(password)
user.save(using=self._db)
user._remove_raw_password()
return user
def create_user(self, email, given_name=None, surname=None, password=None,
first_name=None, last_name=None):
if first_name and not given_name:
given_name = first_name
if last_name and not surname:
surname = last_name
return self._create_user(email=email, given_name=given_name, surname=surname,
password=password)
def create_superuser(self, **kwargs):
user = self.create_user(**kwargs)
user.is_admin = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
user._remove_raw_password()
return user
def delete(self, *args, **kwargs):
for user in self.get_queryset():
user.delete(*args, **kwargs)
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
def sync_accounts_from_stormpath(self, sync_groups=True):
""" :arg sync_groups: WARNING!!! Groups will be deleted from stormpath
if not present locally when user logs in!
Sync accounts from stormpath -> local database.
This may take a long time, depending on how many users you have in your
Stormpath application. It also makes numerous database queries.
This method updates local users from stormpath or creates new ones
where the user does not exist locally. This is an additive operation,
meaning it should delete no data from the local database OR stormpath.
"""
if sync_groups:
sp_groups = [g.name for g in APPLICATION.groups]
db_groups = set(Group.objects.all().values_list('name', flat=True))
missing_from_db = set(sp_groups).difference(db_groups)
if missing_from_db:
groups_to_create = []
for g_name in missing_from_db:
groups_to_create.append(Group(name=g_name))
Group.objects.bulk_create(groups_to_create)
for account in APPLICATION.accounts:
try:
user = StormpathUser.objects.get(email=account.email)
created = True
except StormpathUser.DoesNotExist:
user = StormpathUser()
created = True
user._mirror_data_from_stormpath_account(account)
user.set_unusable_password()
if created:
user._save_db_only()
if sync_groups:
users_sp_groups = [g.name for g in account.groups]
user.groups = Group.objects.filter(name__in=users_sp_groups)
user._save_db_only()
delete.alters_data = True
delete.queryset_only = True
class StormpathBaseUser(AbstractBaseUser, PermissionsMixin):
class Meta:
abstract = True
href = models.CharField(max_length=255, null=True, blank=True)
username = models.CharField(max_length=255, unique=True)
given_name = models.CharField(max_length=255)
surname = models.CharField(max_length=255)
middle_name = models.CharField(max_length=255, null=True, blank=True)
email = models.EmailField(verbose_name='email address',
max_length=255,
unique=True,
db_index=True)
STORMPATH_BASE_FIELDS = ['href', 'username', 'given_name', 'surname', 'middle_name', 'email', 'password']
EXCLUDE_FIELDS = ['href', 'last_login', 'groups', 'id', 'stormpathpermissionsmixin_ptr', 'user_permissions']
PASSWORD_FIELD = 'password'
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['given_name', 'surname']
is_active = models.BooleanField(default=get_default_is_active)
is_verified = models.BooleanField(default=False)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
objects = StormpathUserManager()
DJANGO_PREFIX = 'spDjango_'
@property
def first_name(self):
"""This property is added to make Stormpath user compatible
with Django user (first_name is used instead of given_name).
"""
return self.given_name
@first_name.setter
def first_name(self, value):
self.given_name = value
@property
def last_name(self):
"""This property is added to make Stormpath user compatible
with Django user (last_name is used instead of surname).
"""
return self.surname
@last_name.setter
def last_name(self, value):
self.surname = value
def _mirror_data_from_db_user(self, account, data):
for field in self.EXCLUDE_FIELDS:
if field in data:
del data[field]
if data['is_active']:
account.status = account.STATUS_ENABLED
elif data['is_verified']:
account.status = account.STATUS_DISABLED
else:
account.status = account.STATUS_UNVERIFIED
if 'is_active' in data:
del data['is_active']
for key in data:
if key in self.STORMPATH_BASE_FIELDS:
account[key] = data[key]
else:
account.custom_data[self.DJANGO_PREFIX + key] = data[key]
return account
def _mirror_data_from_stormpath_account(self, account):
for field in self.STORMPATH_BASE_FIELDS:
# The password is not sent via the API
# so we take care here to not try and
# mirror it because it's not there
if field != 'password':
self.__setattr__(field, account[field])
for key in account.custom_data.keys():
self.__setattr__(key.split(self.DJANGO_PREFIX)[0], account.custom_data[key])
if account.status == account.STATUS_ENABLED:
self.is_active = True
self.is_verified = not get_default_is_active()
else:
self.is_active = False
if account.status == account.STATUS_UNVERIFIED:
self.is_verified = False
def _save_sp_group_memberships(self, account):
try:
db_groups = self.groups.values_list('name', flat=True)
for g in db_groups:
if not account.has_group(g):
account.add_group(g)
account.save()
for gm in account.group_memberships:
if gm.group.name not in db_groups:
gm.delete()
except Exception:
raise IntegrityError("Unable to save group memberships.")
def _create_stormpath_user(self, data, raw_password):
data['password'] = raw_password
account = APPLICATION.accounts.create(data)
self._save_sp_group_memberships(account)
return account
def _update_stormpath_user(self, data, raw_password):
# if password has changed
if raw_password:
data['password'] = raw_password
else:
# don't set the password if it hasn't changed
del data['password']
try:
acc = APPLICATION.accounts.get(data.get('href'))
# materialize it
acc.email
acc = self._mirror_data_from_db_user(acc, data)
acc.save()
self._save_sp_group_memberships(acc)
return acc
except StormpathError as e:
if e.status == 404:
raise self.DoesNotExist('Could not find Stormpath User.')
else:
raise e
finally:
self._remove_raw_password()
def get_full_name(self):
return "%s %s" % (self.given_name, self.surname)
def get_short_name(self):
return self.email
def __unicode__(self):
return self.get_full_name()
def _update_for_db_and_stormpath(self, *args, **kwargs):
try:
with transaction.atomic():
super(StormpathBaseUser, self).save(*args, **kwargs)
self._update_stormpath_user(model_to_dict(self), self._get_raw_password())
except StormpathError:
raise
except ObjectDoesNotExist:
self.delete()
raise
except Exception:
raise
def _create_for_db_and_stormpath(self, *args, **kwargs):
try:
with transaction.atomic():
super(StormpathBaseUser, self).save(*args, **kwargs)
account = self._create_stormpath_user(model_to_dict(self), self._get_raw_password())
self.href = account.href
self.username = account.username
self.save(*args, **kwargs)
except StormpathError:
raise
except Exception:
# we're not sure if we have a href yet, hence we
# filter by email
accounts = APPLICATION.accounts.search({'email': self.email})
if accounts:
accounts[0].delete()
raise
def _save_db_only(self, *args, **kwargs):
super(StormpathBaseUser, self).save(*args, **kwargs)
def _remove_raw_password(self):
"""We need to send a raw password to Stormpath. After an Account is saved on Stormpath
we need to remove the raw password field from the local object"""
try:
del self.raw_password
except AttributeError:
pass
def _get_raw_password(self):
try:
return self.raw_password
except AttributeError:
return None
def set_password(self, raw_password):
"""We don't want to keep passwords locally"""
self.set_unusable_password()
self.raw_password = raw_password
def check_password(self, raw_password):
try:
acc = APPLICATION.authenticate_account(self.username, raw_password)
return acc is not None
except StormpathError as e:
# explicity check to see if password is incorrect
if e.code == 7100:
return False
raise e
def save(self, *args, **kwargs):
self.username = getattr(self, self.USERNAME_FIELD)
# Are we updating an existing User?
if self.id:
self._update_for_db_and_stormpath(*args, **kwargs)
# Or are we creating a new user?
else:
self._create_for_db_and_stormpath(*args, **kwargs)
def delete(self, *args, **kwargs):
with transaction.atomic():
href = self.href
super(StormpathBaseUser, self).delete(*args, **kwargs)
try:
account = APPLICATION.accounts.get(href)
account.delete()
except StormpathError:
raise
class StormpathUser(StormpathBaseUser):
pass
@receiver(pre_save, sender=Group)
def save_group_to_stormpath(sender, instance, **kwargs):
try:
if instance.pk is None:
# creating a new group
APPLICATION.groups.create({'name': instance.name})
else:
# updating an existing group
old_group = Group.objects.get(pk=instance.pk)
remote_groups = APPLICATION.groups.search({'name': old_group.name})
if len(remote_groups) is 0:
# group existed locally but not on Stormpath, create it
APPLICATION.groups.create({'name': instance.name})
return
remote_group = remote_groups[0]
if remote_group.name == instance.name:
return # nothing changed
remote_group.name = instance.name
remote_group.save()
except StormpathError as e:
raise IntegrityError(e)
@receiver(pre_delete, sender=Group)
def delete_group_from_stormpath(sender, instance, **kwargs):
try:
APPLICATION.groups.search({'name': instance.name})[0].delete()
except StormpathError as e:
raise IntegrityError(e)
| |
import inspect
import logging
import os
import os.path
import sys
from imp import find_module, load_module
from traceback import format_exc
from ..compat import IS_PYTHON3
logger = logging.getLogger(__name__)
debug, info, warn = (logger.debug, logger.info, logger.warn,)
class RedirectStream(object):
def __init__(self, redirect_handler):
self.redirect_handler = redirect_handler
def write(self, data):
self.redirect_handler(data)
def writelines(self, seq):
self.redirect_handler('\n'.join(seq))
class PluginHost(object):
"""
Class that transforms the python interpreter into a plugin host for
Neovim. It takes care of discovering plugins and routing events/calls
sent by Neovim to the appropriate handlers(registered by plugins)
"""
def __init__(self, nvim, preloaded=[]):
self.nvim = nvim
self.method_handlers = {}
self.event_handlers = {}
self.discovered_plugins = list(preloaded)
self.installed_plugins = []
def __enter__(self):
nvim = self.nvim
info('install import hook/path')
self.hook = path_hook(nvim)
sys.path_hooks.append(self.hook)
nvim.VIM_SPECIAL_PATH = '_vim_path_'
sys.path.append(nvim.VIM_SPECIAL_PATH)
info('redirect sys.stdout and sys.stderr')
self.saved_stdout = sys.stdout
self.saved_stderr = sys.stderr
sys.stdout = RedirectStream(lambda data: nvim.out_write(data))
sys.stderr = RedirectStream(lambda data: nvim.err_write(data))
debug('installing plugins')
self.install_plugins()
return self
def __exit__(self, type, value, traceback):
for plugin in self.installed_plugins:
if hasattr(plugin, 'on_teardown'):
plugin.teardown()
nvim = self.nvim
info('uninstall import hook/path')
sys.path.remove(nvim.VIM_SPECIAL_PATH)
sys.path_hooks.remove(self.hook)
info('restore sys.stdout and sys.stderr')
sys.stdout = self.saved_stdout
sys.stderr = self.saved_stderr
def discover_plugins(self):
loaded = set()
for directory in discover_runtime_directories(self.nvim):
for name in os.listdir(directory):
if not name.startswith(b'nvim_'):
continue
name = os.path.splitext(name)[0]
if name in loaded:
continue
loaded.add(name)
try:
discovered = find_module(name, [directory])
except Exception:
err_str = format_exc(5)
warn('error while searching module %s: %s', name, err_str)
continue
debug('discovered %s', name)
try:
file, pathname, description = discovered
module = load_module(name, file, pathname, description)
for name, value in inspect.getmembers(module,
inspect.isclass):
if name.startswith('Nvim'):
self.discovered_plugins.append(value)
debug('loaded %s', name)
except Exception:
err_str = format_exc(5)
warn('error while loading module %s: %s', name, err_str)
continue
finally:
file.close()
def install_plugins(self):
self.discover_plugins()
nvim = self.nvim
features = nvim.metadata['features']
registered = set()
for plugin_class in self.discovered_plugins:
cls_name = plugin_class.__name__
debug('inspecting class %s', plugin_class.__name__)
try:
plugin = plugin_class(self.nvim)
except Exception:
err_str = format_exc(5)
warn('constructor for %s failed: %s', cls_name, err_str)
continue
methods = inspect.getmembers(plugin, inspect.ismethod)
debug('registering event handlers for %s', plugin_class.__name__)
for method_name, method in methods:
if not method_name.startswith('on_'):
continue
# event handler
# Store all handlers with bytestring keys, since thats how
# msgpack will deserialize method names
event_name = method_name[3:].encode('utf-8')
debug('registering %s event handler', event_name)
if event_name not in self.event_handlers:
self.event_handlers[event_name] = [method]
else:
self.event_handlers[event_name].append(
method.__get__(plugin, plugin_class))
if hasattr(plugin, 'provides') and plugin.provides:
for feature_name in plugin.provides:
if feature_name in registered:
raise Exception('A plugin already provides %s' %
feature_name)
for method_name in features[feature_name]:
# encode for the same reason as above
enc_name = method_name.encode('utf-8')
self.method_handlers[enc_name] = getattr(
# Python 3 attributes need to be unicode instances
# so use `method_name` here
plugin, method_name)
debug('registered %s as a %s provider',
plugin_class.__name__,
feature_name)
nvim.register_provider(feature_name)
registered.add(feature_name)
self.installed_plugins.append(plugin)
def search_handler_for(self, name):
for plugin in self.installed_plugins:
methods = inspect.getmembers(plugin, inspect.ismethod)
for method_name, method in methods:
if method_name == name:
return method
def on_request(self, name, args):
handler = self.method_handlers.get(name, None)
if not handler:
handler = self.search_handler_for(name)
if handler:
self.method_handlers[name] = handler
else:
msg = 'no method handlers for "%s" were found' % name
debug(msg)
raise Exception(msg)
debug("running method handler for '%s %s'", name, args)
rv = handler(*args)
debug("method handler for '%s %s' returns: %s", name, args, rv)
return rv
def on_notification(self, name, args):
handlers = self.event_handlers.get(name, None)
if not handlers:
debug("no event handlers registered for %s", name)
return
debug('running event handlers for %s', name)
for handler in handlers:
handler(*args)
def run(self):
self.nvim.session.run(self.on_request, self.on_notification)
# This was copied/adapted from nvim-python help
def path_hook(nvim):
def _get_paths():
return discover_runtime_directories(nvim)
def _find_module(fullname, oldtail, path):
idx = oldtail.find('.')
if idx > 0:
name = oldtail[:idx]
tail = oldtail[idx+1:]
fmr = find_module(name, path)
module = load_module(fullname[:-len(oldtail)] + name, *fmr)
return _find_module(fullname, tail, module.__path__)
else:
fmr = find_module(fullname, path)
return load_module(fullname, *fmr)
class VimModuleLoader(object):
def __init__(self, module):
self.module = module
def load_module(self, fullname, path=None):
return self.module
class VimPathFinder(object):
@classmethod
def find_module(cls, fullname, path=None):
try:
return VimModuleLoader(
_find_module(fullname, fullname, path or _get_paths()))
except ImportError:
return None
@classmethod
def load_module(cls, fullname, path=None):
return _find_module(fullname, fullname, path or _get_paths())
def hook(path):
if path == nvim.VIM_SPECIAL_PATH:
return VimPathFinder
else:
raise ImportError
return hook
def discover_runtime_directories(nvim):
rv = []
for path in nvim.list_runtime_paths():
if not os.path.exists(path):
continue
path1 = os.path.join(path, b'pythonx')
if IS_PYTHON3:
path2 = os.path.join(path, b'python3')
else:
path2 = os.path.join(path, b'python2')
if os.path.exists(path1):
rv.append(path1)
if os.path.exists(path2):
rv.append(path2)
return rv
| |
import unohelper
from com.sun.star.util import XModifyListener
from com.sun.star.table.CellContentType import FORMULA as CCT_FORMULA
class Rows(object):
""" Row container. """
def __init__(self, data_model, res):
self._rows = []
self.data_model = data_model
self._tooltip = "%s: %%s\n%s: %%s\n%s: %%s\n%s: %%s" % (
res["Sheet"], res["Cell"], res["Value"], res["Formula"])
self._reserved = []
def get_data_model(self):
return self.data_model
def get_row_count(self):
""" Returns number of rows kept by this container. """
return self.data_model.RowCount
def get_row_names(self):
""" Return list of cell names. """
return [row.get_header() for row in self._rows]
def clear(self):
""" Remove all rows from this container. """
for row in self._rows:
row.removed()
self._rows = []
def enable_update(self, state):
""" Enable watching on all rows. """
for row in self._rows:
row.enable_watching(state)
def get(self, index):
""" Get row by index. """
return self._rows[index]
def get_row_header(self, i):
""" Get row heading by index. """
if i >= 0 and i < len(self._rows):
return self._rows[i].get_header()
return ""
def _tooltip_from_data(self, data):
""" Get tooltip text from data. """
return self._tooltip % data
def _broadcast_added(self, index, row):
data_model = self.data_model
data = row.get_data()
data_model.addRow(row.get_header(), data)
data_model.updateRowToolTip(
self.get_row_count() -1, self._tooltip_from_data(data))
def _broadcast_removed(self, index):
self.data_model.removeRow(index)
def _broadcast_reserved_added(self, index, rows):
data_model = self.data_model
data = tuple([row.get_data() for row in rows])
data_model.addRows(
tuple([row.get_header() for row in rows]),
data
)
for i, d in enumerate(data):
data_model.updateRowToolTip(
i + index, self._tooltip_from_data(d))
def reserve_watch(self, cell):
""" Reserve to add watch. Reserved cells are added by add_reserved method. """
self._reserved.append(GridRow(cell, self))
def add_reserved(self):
""" Add reserved rows to the list. """
if self._reserved:
n = len(self._rows)
self._rows[n:n + len(self._reserved)] = self._reserved
self._broadcast_reserved_added(
len(self._rows) - len(self._reserved), self._reserved)
self._reserved[:] = []
def add_watch(self, cell):
""" Add new cell to watch. """
row = GridRow(cell, self)
self._rows.append(row)
i = len(self._rows) - 1
self._broadcast_added(i, row)
return i
def remove_watch(self, index):
""" Remove watch by index. """
if 0 <= index < len(self._rows):
try:
self._rows.pop(index).removed()
self._broadcast_removed(index)
except Exception as e:
print("remove_watch: %s" % str(e))
def remove_all_watch(self):
""" Remove all watches. """
for row in self._rows:
row.removed()
self._rows[:] = []
self.data_model.removeAllRows()
def update_watch(self, row):
""" Force to update specific row. """
try:
i = self._rows.index(row) # ToDo make this faster
self._broadcast_changed(i, row)
# ToDo update input line if selected in the view
except Exception as e:
print("update_watch: %s" % str(e))
def update_all_watch(self):
""" Update all rows. """
try:
# cell address might be changed, so update _names
names = []
for i, row in enumerate(self._rows):
self._broadcast_changed(i, row)
names.append(row.get_header())
except Exception as e:
print("update_all_watch: %s" % str(e))
def _broadcast_changed(self, index, row):
data_model = self.data_model
data = row.get_data()
data_model.updateRowData((0, 1, 2, 3), index, data)
data_model.updateRowHeading(index, row.get_header())
data_model.updateRowToolTip(index, self._tooltip_from_data(data))
def exchange_watches(self, index_a, index_b):
""" Exchange two rows specified by indexes. """
if index_a >= 0 and index_b >= 0 and \
index_a < len(self._rows) and index_b < len(self._rows):
row_a = self._rows[index_a]
row_b = self._rows[index_b]
self._rows[index_a] = row_b
self._rows[index_b] = row_a
self._broadcast_changed(index_a, row_b)
self._broadcast_changed(index_b, row_a)
class GridRow(unohelper.Base, XModifyListener):
""" Row data of the grid, which keeps watched cell reference. """
def __init__(self, cell, data_model):
self._removed = False
self.watching = False
self.cell = cell
self.data_model = data_model
self.enable_watching(True)
def __eq__(self, other):
if isinstance(other, GridRow):
return self.cell == other.get_cell()
else:
try:
addr2 = other.getCellAddress()
addr1 = self.cell.getCellAddress()
return addr1.Sheet == addr2.Sheet and \
addr1.Row == addr2.Row and \
addr1.Column == addr2.Column
except: pass
return False
def get_cell(self):
""" get cell which is kepy by the row. """
return self.cell
def add_modify_listener(self):
""" set modify listener. """
self.cell.addModifyListener(self)
def remove_modify_listener(self):
""" remove modify listener. """
self.cell.removeModifyListener(self)
def enable_watching(self, state):
""" enable watching all. """
if self.watching and not state:
self.remove_modify_listener()
elif not self.watching and state:
self.add_modify_listener()
self.watching = state
def removed(self):
""" the row have been removed. """
if not self._removed:
self.remove_modify_listener()
self._removed = False
self.watching = False
self.cell = None
self.data_model = None
def get_header(self):
""" get address as header string. """
return self.cell.AbsoluteName
def get_data(self):
""" get cell data. """
if self.cell:
addr = self.cell.AbsoluteName
n = addr.rfind(".")
sheet_name = addr[1:n] # ignore first $
if sheet_name.startswith("'"):
sheet_name = sheet_name[1:len(sheet_name)-1].replace("''", "'")
return (
sheet_name,
addr[n + 1:].replace("$", ""),
self.cell.getString(),
self.cell.FormulaLocal if self.cell.getType() == CCT_FORMULA else ""
)
else:
return ("", "", "internal", "error")
def get_sheet_name(self):
""" Get name of the sheet. """
ret = self.cell.AbsoluteName
sheet_name = ret[0:ret.rfind('.')]
if sheet_name.startswith("'"):
ret = sheet_name[1:len(sheet_name)-1].replace("''", "'")
else:
ret = sheet_name
return ret
def get_range_name(self):
""" Get range name of the cell. """
ret = self.cell.AbsoluteName
return ret[ret.rfind('.') + 1:].replace("$", "")
def get_content_type(self):
""" get cell content type. """
return self.cell.getType()
def get_formula(self):
""" get formula of the cell. """
return self.cell.FormulaLocal
def set_formula(self, text):
""" set formula to the cell. """
self.cell.FormulaLocal = text
# XEventListener
def disposing(self, ev):
pass
# XModifyListener
def modified(self, ev):
if self.watching:
self.data_model.update_watch(self)
| |
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import numpy as np
import picamera
import picamera.array
import picamera.bcm_host as bcm_host
import picamera.mmal as mmal
import pytest
import mock
@pytest.fixture()
def fake_cam(request):
cam = mock.Mock()
cam.resolution = (10, 10)
return cam
def test_rgb_array1(camera, mode):
resolution, framerate = mode
with picamera.array.PiRGBArray(camera) as stream:
camera.capture(stream, 'rgb')
assert stream.array.dtype == np.uint8
assert stream.array.shape == (resolution[1], resolution[0], 3)
def test_rgb_array2(fake_cam):
with picamera.array.PiRGBArray(fake_cam) as stream:
stream.write(b'\x01\x02\x03' * 256)
stream.write(b'\x01\x02\x03' * 256)
stream.flush()
assert (stream.array[:, :, 0] == 1).all()
assert (stream.array[:, :, 1] == 2).all()
assert (stream.array[:, :, 2] == 3).all()
stream.truncate(0)
with pytest.raises(picamera.PiCameraValueError):
stream.write(b'\x00' * 10)
stream.flush()
def test_rgb_array3(camera, mode):
resolution, framerate = mode
resize = (resolution[0] // 2, resolution[1] // 2)
with picamera.array.PiRGBArray(camera, size=resize) as stream:
camera.capture(stream, 'rgb', resize=resize)
assert stream.array.dtype == np.uint8
assert stream.array.shape == (resize[1], resize[0], 3)
def test_yuv_array1(camera, mode):
resolution, framerate = mode
if resolution == (2592, 1944):
pytest.xfail('Pi runs out of memory during RGB conversion at this resolution')
with picamera.array.PiYUVArray(camera) as stream:
camera.capture(stream, 'yuv')
assert stream.array.dtype == np.uint8
assert stream.array.shape == (resolution[1], resolution[0], 3)
assert stream.rgb_array.dtype == np.uint8
assert stream.rgb_array.shape == (resolution[1], resolution[0], 3)
def test_yuv_array2(fake_cam):
with picamera.array.PiYUVArray(fake_cam) as stream:
stream.write(b'\x01' * 32 * 16)
stream.write(b'\x02' * 16 * 8)
stream.write(b'\x03' * 16 * 8)
stream.flush()
assert (stream.array[:, :, 0] == 1).all()
assert (stream.array[:, :, 1] == 2).all()
assert (stream.array[:, :, 2] == 3).all()
# XXX What about rgb_array?
stream.truncate(0)
with pytest.raises(picamera.PiCameraValueError):
stream.write(b'\x00' * 10)
stream.flush()
def test_yuv_array3(camera, mode):
resolution, framerate = mode
resize = (resolution[0] // 2, resolution[1] // 2)
with picamera.array.PiYUVArray(camera, size=resize) as stream:
camera.capture(stream, 'yuv', resize=resize)
assert stream.array.dtype == np.uint8
assert stream.array.shape == (resize[1], resize[0], 3)
assert stream.rgb_array.dtype == np.uint8
assert stream.rgb_array.shape == (resize[1], resize[0], 3)
def test_yuv_buffer(camera, mode):
resolution, framerate = mode
width, height = resolution
fwidth = (width + 31) // 32 * 32 # big enough even if 16x16 rounding
fheight = (height + 15) // 16 * 16
buf = np.empty((int(fwidth * fheight * 1.5),), dtype=np.uint8)
camera.capture(buf, 'yuv')
def test_rgb_buffer(camera, mode):
resolution, framerate = mode
width, height = resolution
fwidth = (width + 31) // 32 * 32 # big enough even if 16x16 rounding
fheight = (height + 15) // 16 * 16
buf = np.empty((fwidth * fheight * 3,), dtype=np.uint8)
camera.capture(buf, 'rgb')
def test_bayer_array(camera, mode):
with picamera.array.PiBayerArray(camera) as stream:
camera.capture(stream, 'jpeg', bayer=True)
# Bayer data is always full res
if camera.exif_tags['IFD0.Model'].upper() == 'RP_OV5647':
assert stream.array.shape == (1944, 2592, 3)
assert stream.demosaic().shape == (1944, 2592, 3)
else:
assert stream.array.shape == (2464, 3280, 3)
assert stream.demosaic().shape == (2464, 3280, 3)
def test_motion_array1(camera, mode):
resolution, framerate = mode
if resolution == (2592, 1944):
pytest.xfail('Cannot encode video at max resolution')
elif framerate == 5 and camera.exif_tags['IFD0.Model'].upper() == 'RP_IMX219':
pytest.xfail('Motion vectors fail at low framerate on V2 camera module')
with picamera.array.PiMotionArray(camera) as stream:
camera.start_recording('/dev/null', 'h264', motion_output=stream)
camera.wait_recording(1)
camera.stop_recording()
width = ((resolution[0] + 15) // 16) + 1
height = (resolution[1] + 15) // 16
assert stream.array.shape[1:] == (height, width)
# Number of frames isn't going to be exact and due to start-up costs
# in recent firmwares a lower bound seems difficult to calculate. Make
# sure we get at least 1 frame and no more than we expect
assert 1 < stream.array.shape[0] <= framerate
def test_motion_array2(camera, mode):
resolution, framerate = mode
if framerate == 5 and camera.exif_tags['IFD0.Model'].upper() == 'RP_IMX219':
pytest.xfail('Motion vectors fail at low framerate on V2 camera module')
if resolution == (2592, 1944):
resize = (640, 480)
else:
resize = (resolution[0] // 2, resolution[1] // 2)
with picamera.array.PiMotionArray(camera, size=resize) as stream:
camera.start_recording(
'/dev/null', 'h264', motion_output=stream, resize=resize)
camera.wait_recording(1)
camera.stop_recording()
width = ((resize[0] + 15) // 16) + 1
height = (resize[1] + 15) // 16
assert stream.array.shape[1:] == (height, width)
# Number of frames isn't going to be exact and due to start-up costs
# in recent firmwares a lower bound seems difficult to calculate. Make
# sure we get at least 1 frame and no more than we expect
assert 1 < stream.array.shape[0] <= framerate
def test_yuv_analysis1(camera, mode):
resolution, framerate = mode
if resolution == (2592, 1944):
pytest.xfail('Cannot encode video at max resolution')
class YUVTest(picamera.array.PiYUVAnalysis):
def __init__(self, camera):
super(YUVTest, self).__init__(camera)
self.write_called = False
def analyze(self, a):
self.write_called = True
assert a.shape == (resolution[1], resolution[0], 3)
with YUVTest(camera) as stream:
camera.start_recording(stream, 'yuv')
camera.wait_recording(1)
camera.stop_recording()
assert stream.write_called
def test_yuv_analysis2(fake_cam):
class YUVTest(picamera.array.PiYUVAnalysis):
def analyze(self, a):
assert (a[..., 0] == 1).all()
assert (a[..., 1] == 2).all()
assert (a[..., 2] == 3).all()
with YUVTest(fake_cam) as stream:
stream.write((b'\x01' * 32 * 16) + (b'\x02' * 16 * 8) + (b'\x03' * 16 * 8))
with pytest.raises(picamera.PiCameraValueError):
stream.write(b'\x00' * 10)
def test_rgb_analysis1(camera, mode):
resolution, framerate = mode
if resolution == (2592, 1944):
pytest.xfail('Cannot encode video at max resolution')
class RGBTest(picamera.array.PiRGBAnalysis):
def __init__(self, camera):
super(RGBTest, self).__init__(camera)
self.write_called = False
def analyze(self, a):
self.write_called = True
assert a.shape == (resolution[1], resolution[0], 3)
with RGBTest(camera) as stream:
camera.start_recording(stream, 'rgb')
camera.wait_recording(1)
camera.stop_recording()
assert stream.write_called
def test_rgb_analysis2(fake_cam):
class RGBTest(picamera.array.PiRGBAnalysis):
def analyze(self, a):
assert (a[..., 0] == 1).all()
assert (a[..., 1] == 2).all()
assert (a[..., 2] == 3).all()
with RGBTest(fake_cam) as stream:
stream.write(b'\x01\x02\x03' * 512)
with pytest.raises(picamera.PiCameraValueError):
stream.write(b'\x00' * 10)
def test_motion_analysis1(camera, mode):
resolution, framerate = mode
if resolution == (2592, 1944):
pytest.xfail('Cannot encode video at max resolution')
width = ((resolution[0] + 15) // 16) + 1
height = (resolution[1] + 15) // 16
class MATest(picamera.array.PiMotionAnalysis):
def __init__(self, camera):
super(MATest, self).__init__(camera)
self.write_called = False
def analyze(self, a):
self.write_called = True
assert a.shape == (height, width)
with MATest(camera) as stream:
camera.start_recording('/dev/null', 'h264', motion_output=stream)
camera.wait_recording(1)
camera.stop_recording()
assert stream.write_called
def test_motion_analysis2(camera, mode):
resolution, framerate = mode
if resolution == (2592, 1944):
resize = (640, 480)
else:
resize = (resolution[0] // 2, resolution[1] // 2)
width = ((resize[0] + 15) // 16) + 1
height = (resize[1] + 15) // 16
class MATest(picamera.array.PiMotionAnalysis):
def __init__(self, camera, size):
super(MATest, self).__init__(camera, size)
self.write_called = False
def analyze(self, a):
self.write_called = True
assert a.shape == (height, width)
with MATest(camera, size=resize) as stream:
camera.start_recording(
'/dev/null', 'h264', motion_output=stream, resize=resize)
camera.wait_recording(1)
camera.stop_recording()
assert stream.write_called
def test_overlay_array1(camera, mode):
resolution, framerate = mode
# Draw a cross overlay
w, h = resolution
w = bcm_host.VCOS_ALIGN_UP(w, 32)
h = bcm_host.VCOS_ALIGN_UP(h, 16)
a = np.zeros((h, w, 3), dtype=np.uint8)
a[resolution[1] // 2, :, :] = 0xff
a[:, resolution[0] // 2, :] = 0xff
overlay = camera.add_overlay(a, resolution, alpha=128)
assert len(camera.overlays) == 1
assert camera.overlays[0].alpha == 128
camera.remove_overlay(overlay)
assert not camera.overlays
def test_overlay_array2(camera, mode):
resolution, framerate = mode
# Construct an array 25x25 big and display it at 10x10 on the screen
a = np.zeros((32, 32, 3), dtype=np.uint8)
a[:25, :25, :] = 0xff
overlay = camera.add_overlay(
a, (25, 25), layer=3, fullscreen=False, window=(10, 10, 25, 25))
assert len(camera.overlays) == 1
assert not camera.overlays[0].fullscreen
assert camera.overlays[0].window == (10, 10, 25, 25)
assert camera.overlays[0].layer == 3
camera.remove_overlay(overlay)
assert not camera.overlays
def test_overlay_array3(camera, mode):
resolution, framerate = mode
# Construct an array 32x32x3 array, make sure it's auto-detected as RGB
a = np.zeros((32, 32, 3), dtype=np.uint8)
overlay = camera.add_overlay(a, (32, 32))
try:
assert overlay.renderer.inputs[0].format == mmal.MMAL_ENCODING_RGB24
finally:
camera.remove_overlay(overlay)
# Make sure it works with an explicit specification of RGB or BGR
overlay = camera.add_overlay(a, (32, 32), 'rgb')
try:
assert overlay.renderer.inputs[0].format == mmal.MMAL_ENCODING_RGB24
finally:
camera.remove_overlay(overlay)
overlay = camera.add_overlay(a, (32, 32), 'bgr')
try:
assert overlay.renderer.inputs[0].format == mmal.MMAL_ENCODING_BGR24
finally:
camera.remove_overlay(overlay)
# Construct an array 32x32x4 array, make sure it's auto-detected as RGBA
a = np.zeros((32, 32, 4), dtype=np.uint8)
overlay = camera.add_overlay(a, (32, 32))
try:
assert overlay.renderer.inputs[0].format == mmal.MMAL_ENCODING_RGBA
finally:
camera.remove_overlay(overlay)
# Make sure it works with an explicit specification of RGBA (we don't
# test BGRA as old firmwares don't supported it on renderers)
overlay = camera.add_overlay(a, (32, 32), 'rgba')
try:
assert overlay.renderer.inputs[0].format == mmal.MMAL_ENCODING_RGBA
finally:
camera.remove_overlay(overlay)
# Make sure it fails with RGB or BGR
with pytest.raises(picamera.PiCameraError):
overlay = camera.add_overlay(a, (32, 32), 'rgb')
with pytest.raises(picamera.PiCameraError):
overlay = camera.add_overlay(a, (32, 32), 'bgr')
def test_bayer_bad(camera):
stream = picamera.array.PiBayerArray(camera)
stream.write(b'\x00' * 12000000)
with pytest.raises(picamera.PiCameraValueError):
stream.flush()
def test_array_writable(camera):
stream = picamera.array.PiRGBArray(camera)
assert stream.writable()
def test_array_no_analyze(camera):
stream = picamera.array.PiRGBAnalysis(camera)
res = camera.resolution.pad()
with pytest.raises(NotImplementedError):
stream.write(b'\x00' * (res.width * res.height * 3))
def test_analysis_writable(camera):
stream = picamera.array.PiRGBAnalysis(camera)
assert stream.writable()
| |
import unittest
from classtime.brain.scheduling import Schedule
class TestSchedule(unittest.TestCase): #pylint: disable=R0904
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def test_section_add(self):
def assert_section_add(section, numblocks_expected):
"""
Check that adding a given section to a new Schedule
- fills the expected number of timetable blocks
"""
schedule = Schedule()
schedule.add_section(section)
numblocks = 0
for day in schedule.timetable:
for block in day:
if block is not Schedule.OPEN:
numblocks += 1
assert numblocks == numblocks_expected
testcases = [
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '08:50 AM',
'expected': 4
},
{
'day': 'MTWRF',
'startTime': '08:00 AM',
'endTime': '08:50 AM',
'expected': 10
},
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '08:50 PM',
'expected': 52
},
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '09:20 AM',
'expected': 6
},
{
'day': 'M',
'startTime': '06:00 PM',
'endTime': '08:50 PM',
'expected': 6
}
]
for section in testcases:
assert_section_add(section,
section.get('expected'))
def test_busy_time_add(self):
def assert_busy_time_add(busy_time, numblocks_expected):
"""
Check that adding a given busy_time to a new Schedule
- fills the expected number of timetable blocks
"""
schedule = Schedule()
schedule.add_busy_time(busy_time)
numblocks = 0
for day in schedule.timetable:
for block in day:
if block is not Schedule.OPEN:
numblocks += 1
assert numblocks == numblocks_expected
testcases = [
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '08:50 AM',
'expected': 4
},
{
'day': 'MTWRF',
'startTime': '08:00 AM',
'endTime': '08:50 AM',
'expected': 10
},
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '08:50 PM',
'expected': 52
},
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '09:20 AM',
'expected': 6
},
{
'day': 'M',
'startTime': '06:00 PM',
'endTime': '08:50 PM',
'expected': 6
}
]
for section in testcases:
assert_busy_time_add(section,
section.get('expected'))
def test_busy_time_overlap(self):
"""
See github.com/rosshamish/classtime/issues/96
"""
def assert_busy_time_overlap_doesnt_double(busy_times, numblocks_expected):
"""
Check that adding the same busy_time more than once to a new Schedule
is idempotent
"""
schedule = Schedule()
bitmaps_set = set()
for busy_time in busy_times:
schedule.add_busy_time(busy_time)
bitmaps_set.add(''.join(bin(day_bitmap) for day_bitmap in schedule.timetable_bitmap))
assert len(bitmaps_set) == 1
numblocks = 0
for day in schedule.timetable:
for block in day:
if block is not Schedule.OPEN:
numblocks += 1
assert numblocks == numblocks_expected
testcases = [
{
'busy_times': [
{
'day': 'W',
'startTime': '08:00 AM',
'endTime': '08:20 AM',
},
{
'day': 'W',
'startTime': '08:00 AM',
'endTime': '08:20 AM',
}
],
'expected': 1
}
]
for testcase in testcases:
assert_busy_time_overlap_doesnt_double(testcase.get('busy_times'),
testcase.get('expected'))
def test_conflict_recognition(self): #pylint: disable=R0201
def assert_conflict_recognition(sections, has_conflict):
"""
Assert that a list of sections has either:
- one or more conflicts, or
- no conflicts
"""
schedule = Schedule()
for section in sections:
if schedule.conflicts(section):
assert has_conflict == True
return
else:
schedule.add_section(section)
assert has_conflict == False
testcases = [
{
'expected': True,
'sections':
[
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '08:50 AM'
},
{
'day': 'MTWRF',
'startTime': '08:00 AM',
'endTime': '08:50 AM'
}
]
},
{
'expected': False,
'sections':
[
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '08:50 AM'
},
{
'day': 'TR',
'startTime': '09:00 AM',
'endTime': '09:50 AM'
}
]
},
{
'expected': False,
'sections':
[
{
'day': 'TR',
'startTime': '07:00 PM',
'endTime': '07:50 PM'
},
{
'day': 'TR',
'startTime': '08:00 PM',
'endTime': '09:00 PM'
}
]
},
{
'expected': True,
'sections':
[
{
'day': 'TR',
'startTime': '07:00 PM',
'endTime': '07:50 PM'
},
{
'day': 'TR',
'startTime': '08:00 PM',
'endTime': '09:00 PM'
},
{
'day': 'TR',
'startTime': '08:30 PM',
'endTime': '08:50 PM'
}
]
}
]
for scenario in testcases:
assert_conflict_recognition(scenario.get('sections'),
scenario.get('expected'))
def test_comparison_called_but_no_courses(self):
sched = Schedule(sections=[], preferences={'no-marathons': 1})
sched.is_similar(Schedule())
def test_preferences_null_values():
sched = Schedule(preferences={ 'no-marathons': None })
sched.overall_score() # should not raise an exception
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
from py3compat import PY2
from optparse import make_option
from django.core.management.base import BaseCommand
from django.core.management import call_command
from django.utils import timezone
from dateutil.parser import parse
if PY2:
import unicodecsv as csv
else:
import csv
from health_ident.models import (Entity, AdministrativeEntity,
HealthEntity, EntityType,
EntityHistory, HealthEntityProperty)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-a',
help='CSV file to import Administrative Entities from',
action='store',
dest='input_admin_file'),
make_option('-f',
help='CSV file to import Health Entities from',
action='store',
dest='input_health_file'),
make_option('-p',
help='CSV file to import Health Entity Properties from',
action='store',
dest='input_health_properties_file'),
make_option('-c',
help='Delete all Entity fixtures first',
action='store_true',
dest='clear')
)
def handle(self, *args, **options):
admin_headers = ['IDENT_Code', 'IDENT_Name', 'IDENT_Type', 'IDENT_ParentCode',
'IDENT_ModifiedOn', 'IDENT_RegionName', 'IDENT_CercleName',
'IDENT_CommuneName',
'IDENT_HealthAreaCode', 'IDENT_HealthAreaName',
'IDENT_HealthAreaCenterDistance',
'IDENT_Latitude', 'IDENT_Longitude', 'IDENT_Geometry']
health_headers = ['IDENT_Code', 'IDENT_Name', 'IDENT_Type', 'IDENT_ParentCode',
'IDENT_ModifiedOn',
'IDENT_HealthRegionCode', 'IDENT_HealthDistrictCode',
'IDENT_HealthAreaCode', 'IDENT_MainEntityCode',
'IDENT_Latitude', 'IDENT_Longitude', 'IDENT_Geometry']
properties_headers = ['IDENT_Code', 'IDENT_PropertyName',
'IDENT_PropertyValue', 'IDENT_PropertyModifiedOn']
input_admin_file = open(options.get('input_admin_file'), 'r')
admin_csv_reader = csv.DictReader(input_admin_file, fieldnames=admin_headers)
input_health_file = open(options.get('input_health_file'), 'r')
health_csv_reader = csv.DictReader(input_health_file, fieldnames=health_headers)
input_health_properties_file = open(options.get('input_health_properties_file'), 'r')
health_properties_csv_reader = csv.DictReader(input_health_properties_file,
fieldnames=properties_headers)
if options.get('clear'):
print("Removing all entities...")
AdministrativeEntity.objects.all().delete()
HealthEntity.objects.all().delete()
Entity.objects.all().delete()
EntityHistory.objects.all().delete()
HealthEntityProperty.objects.all().delete()
print("Importing fixtures")
call_command("loaddata", "fixtures/EntityType.xml")
call_command("loaddata", "fixtures/Entity-root.xml")
def add_entity(entity_dict, is_admin):
cls = AdministrativeEntity if is_admin else HealthEntity
slug = entry.get('IDENT_Code')
name = entry.get('IDENT_Name')
type_slug = entry.get('IDENT_Type')
entity_type = EntityType.objects.get(slug=type_slug)
parent_slug = entry.get('IDENT_ParentCode')
latitude = entry.get('IDENT_Latitude')
longitude = entry.get('IDENT_Longitude')
geometry = entry.get('IDENT_Geometry')
health_area_slug = entry.get('IDENT_HealthAreaCode')
try:
health_area_center_distance = float(entry.get('IDENT_HealthAreaCenterDistance'))
except:
health_area_center_distance = None
if entity_type == 'vfq' and health_area_slug:
health_area = HealthEntity.objects.get(slug=health_area_slug)
else:
health_area = None
entity = cls.objects.create(slug=slug,
name=name,
type=entity_type,
latitude=latitude or None,
longitude=longitude or None,
geometry=geometry or None)
if parent_slug:
parentcls = Entity if parent_slug == 'mali' else cls
parent = parentcls.objects.get(slug=parent_slug)
entity.parent = parent
if cls == AdministrativeEntity and health_area:
entity.health_entity = health_area
if health_area_center_distance:
entity.main_entity_distance = health_area_center_distance
entity.save()
print(entity.name)
print("Importing Health Entities...")
for entry in health_csv_reader:
if health_csv_reader.line_num == 1:
continue
add_entity(entry, False)
print("Importing Admin Entities...")
for entry in admin_csv_reader:
if admin_csv_reader.line_num == 1:
continue
add_entity(entry, True)
print("Updating Health Entities with main center")
for entry in health_csv_reader:
if health_csv_reader.line_num == 1:
continue
if not entry.get('IDENT_MainEntityCode'):
continue
entity = HealthEntity.objects.get(entry.get('IDENT_Code'))
main_entity = HealthEntity.objects.get(entry.get('IDENT_MainEntityCode'))
entity.main_entity = main_entity
entity.save()
print("Setting EntityHistory...")
for entity in HealthEntity.objects.all():
EntityHistory.objects.create(entity=entity, active=True)
print("[MIGRATION] Setting HealthEntityProperty...")
HealthEntityProperty.objects.all().delete()
hc_only = ['Category', 'Facility Name', 'uuid', 'Distance',
'LL_Source', 'Facility Type Recoded', 'Start Date',
'Accessibility', 'Long', 'Facility_Type', 'Source', 'Lat']
ha_only = ['Village Number', 'Population']
both = ['Region', 'District', 'Map Admin2', 'Map Admin1']
def _create(entity, entry):
modified_on = parse(entry.get('IDENT_PropertyModifiedOn')).replace(tzinfo=timezone.utc)
HealthEntityProperty.objects.create(
entity=entity,
name=entry.get('IDENT_PropertyName'),
value=entry.get('IDENT_PropertyValue'),
modified_on=modified_on)
for entry in health_properties_csv_reader:
if health_properties_csv_reader.line_num == 1:
continue
hc = HealthEntity.objects.get(slug=entry.get('IDENT_Code'))
print(hc)
try:
ha = HealthEntity.objects.get(slug="Z{}".format(entry.get('IDENT_Code')))
except HealthEntity.DoesNotExist:
ha = None
if entry.get('IDENT_PropertyName') in hc_only:
_create(hc, entry)
elif entry.get('IDENT_PropertyName') in ha_only and ha:
_create(ha, entry)
elif entry.get('IDENT_PropertyName') in both:
if ha:
_create(ha, entry)
_create(hc, entry)
else:
_create(hc, entry)
# print("Importing Admin Entities...")
# for entry in admin_csv_reader:
# if admin_csv_reader.line_num == 1:
# continue
# if entry.get('IDENT_HealthAreaCenterDistance'):
# entity = AdministrativeEntity.objects.get(slug=entry.get('IDENT_Code'))
# try:
# health_area_center_distance = float(entry.get('IDENT_HealthAreaCenterDistance'))
# except:
# health_area_center_distance = None
# entity.main_entity_distance = health_area_center_distance
# entity.save()
# if entry.get('IDENT_HealthAreaCode'):
# entity = AdministrativeEntity.objects.get(slug=entry.get('IDENT_Code'))
# he = HealthEntity.objects.get(slug=entry.get('IDENT_HealthAreaCode'))
# entity.health_entity = he
# entity.save()
# def move_properties():
# from health_ident.models import HealthEntity, HealthEntityProperty
# def _try(hp, target_slug):
# target = HealthEntity.objects.get(slug=target_slug)
# try:
# HealthEntityProperty.objects.create(
# entity=target,
# name=hp.name,
# value=hp.value,
# modified_on=hp.modified_on)
# except:
# pass
# hc_only = ['Category', 'Facility Name', 'uuid', 'Distance',
# 'LL_Source', 'Facility Type Recoded', 'Start Date',
# 'Accessibility', 'Long', 'Facility_Type', 'Source', 'Lat']
# ha_only = ['Village Number', 'Population']
# both = ['Region', 'District', 'Map Admin2', 'Map Admin1']
# for hp in HealthEntityProperty.objects.filter(name__in=hc_only):
# if hp.entity.type.slug != 'health_center':
# _try(hp, hp.entity.slug.replace(r'^Z', ''))
# hp.delete()
# for hp in HealthEntityProperty.objects.filter(name__in=ha_only):
# if hp.entity.type.slug != 'health_area':
# _try(hp, 'Z{}'.format(hp.entity.slug))
# hp.delete()
# for hp in HealthEntityProperty.objects.filter(name__in=both):
# _try(hp, hp.entity.slug.replace(r'^Z', ''))
| |
"""Elliptical geometrical entities.
Contains
* Ellipse
* Circle
"""
from __future__ import print_function, division
from sympy.core import S, C, sympify, pi, Dummy
from sympy.core.logic import fuzzy_bool
from sympy.core.numbers import oo, zoo
from sympy.simplify import simplify, trigsimp
from sympy.functions.elementary.miscellaneous import sqrt, Max, Min
from sympy.functions.elementary.complexes import im
from sympy.geometry.exceptions import GeometryError
from sympy.polys import Poly, PolynomialError
from sympy.solvers import solve
from sympy.utilities.lambdify import lambdify
from sympy.utilities.iterables import uniq
from sympy.utilities.misc import filldedent
from .entity import GeometryEntity
from .point import Point
from .line import LinearEntity, Line
from .util import _symbol, idiff
from sympy.mpmath import findroot as nroot
import random
from sympy.utilities.decorator import doctest_depends_on
class Ellipse(GeometryEntity):
"""An elliptical GeometryEntity.
Parameters
==========
center : Point, optional
Default value is Point(0, 0)
hradius : number or SymPy expression, optional
vradius : number or SymPy expression, optional
eccentricity : number or SymPy expression, optional
Two of `hradius`, `vradius` and `eccentricity` must be supplied to
create an Ellipse. The third is derived from the two supplied.
Attributes
==========
center
hradius
vradius
area
circumference
eccentricity
periapsis
apoapsis
focus_distance
foci
Raises
======
GeometryError
When `hradius`, `vradius` and `eccentricity` are incorrectly supplied
as parameters.
TypeError
When `center` is not a Point.
See Also
========
Circle
Notes
-----
Constructed from a center and two radii, the first being the horizontal
radius (along the x-axis) and the second being the vertical radius (along
the y-axis).
When symbolic value for hradius and vradius are used, any calculation that
refers to the foci or the major or minor axis will assume that the ellipse
has its major radius on the x-axis. If this is not true then a manual
rotation is necessary.
Examples
========
>>> from sympy import Ellipse, Point, Rational
>>> e1 = Ellipse(Point(0, 0), 5, 1)
>>> e1.hradius, e1.vradius
(5, 1)
>>> e2 = Ellipse(Point(3, 1), hradius=3, eccentricity=Rational(4, 5))
>>> e2
Ellipse(Point(3, 1), 3, 9/5)
Plotting:
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy import Circle, Segment
>>> c1 = Circle(Point(0,0), 1)
>>> Plot(c1) # doctest: +SKIP
[0]: cos(t), sin(t), 'mode=parametric'
>>> p = Plot() # doctest: +SKIP
>>> p[0] = c1 # doctest: +SKIP
>>> radius = Segment(c1.center, c1.random_point())
>>> p[1] = radius # doctest: +SKIP
>>> p # doctest: +SKIP
[0]: cos(t), sin(t), 'mode=parametric'
[1]: t*cos(1.546086215036205357975518382),
t*sin(1.546086215036205357975518382), 'mode=parametric'
"""
def __new__(
cls, center=None, hradius=None, vradius=None, eccentricity=None,
**kwargs):
hradius = sympify(hradius)
vradius = sympify(vradius)
eccentricity = sympify(eccentricity)
if center is None:
center = Point(0, 0)
else:
center = Point(center)
if len(list(filter(None, (hradius, vradius, eccentricity)))) != 2:
raise ValueError('Exactly two arguments of "hradius", '
'"vradius", and "eccentricity" must not be None."')
if eccentricity is not None:
if hradius is None:
hradius = vradius / sqrt(1 - eccentricity**2)
elif vradius is None:
vradius = hradius * sqrt(1 - eccentricity**2)
if hradius == vradius:
return Circle(center, hradius, **kwargs)
return GeometryEntity.__new__(cls, center, hradius, vradius, **kwargs)
@property
def center(self):
"""The center of the ellipse.
Returns
=======
center : number
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.center
Point(0, 0)
"""
return self.args[0]
@property
def hradius(self):
"""The horizontal radius of the ellipse.
Returns
=======
hradius : number
See Also
========
vradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.hradius
3
"""
return self.args[1]
@property
def vradius(self):
"""The vertical radius of the ellipse.
Returns
=======
vradius : number
See Also
========
hradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.vradius
1
"""
return self.args[2]
@property
def minor(self):
"""Shorter axis of the ellipse (if it can be determined) else vradius.
Returns
=======
minor : number or expression
See Also
========
hradius, vradius, major
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.minor
1
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).minor
b
>>> Ellipse(p1, b, a).minor
a
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).minor
m
"""
rv = Min(*self.args[1:3])
if rv.func is Min:
return self.vradius
return rv
@property
def major(self):
"""Longer axis of the ellipse (if it can be determined) else hradius.
Returns
=======
major : number or expression
See Also
========
hradius, vradius, minor
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.major
3
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).major
a
>>> Ellipse(p1, b, a).major
b
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).major
m + 1
"""
rv = Max(*self.args[1:3])
if rv.func is Max:
return self.hradius
return rv
@property
def area(self):
"""The area of the ellipse.
Returns
=======
area : number
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.area
3*pi
"""
return simplify(S.Pi * self.hradius * self.vradius)
@property
def circumference(self):
"""The circumference of the ellipse.
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.circumference
12*Integral(sqrt((-8*_x**2/9 + 1)/(-_x**2 + 1)), (_x, 0, 1))
"""
if self.eccentricity == 1:
return 2*pi*self.hradius
else:
x = C.Dummy('x', real=True)
return 4*self.major*C.Integral(
sqrt((1 - (self.eccentricity*x)**2)/(1 - x**2)), (x, 0, 1))
@property
def eccentricity(self):
"""The eccentricity of the ellipse.
Returns
=======
eccentricity : number
Examples
========
>>> from sympy import Point, Ellipse, sqrt
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, sqrt(2))
>>> e1.eccentricity
sqrt(7)/3
"""
return self.focus_distance / self.major
@property
def periapsis(self):
"""The periapsis of the ellipse.
The shortest distance between the focus and the contour.
Returns
=======
periapsis : number
See Also
========
apoapsis : Returns greatest distance between focus and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.periapsis
-2*sqrt(2) + 3
"""
return self.major * (1 - self.eccentricity)
@property
def apoapsis(self):
"""The apoapsis of the ellipse.
The greatest distance between the focus and the contour.
Returns
=======
apoapsis : number
See Also
========
periapsis : Returns shortest distance between foci and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.apoapsis
2*sqrt(2) + 3
"""
return self.major * (1 + self.eccentricity)
@property
def focus_distance(self):
"""The focale distance of the ellipse.
The distance between the center and one focus.
Returns
=======
focus_distance : number
See Also
========
foci
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.focus_distance
2*sqrt(2)
"""
return Point.distance(self.center, self.foci[0])
@property
def foci(self):
"""The foci of the ellipse.
Notes
-----
The foci can only be calculated if the major/minor axes are known.
Raises
======
ValueError
When the major and minor axis cannot be determined.
See Also
========
sympy.geometry.point.Point
focus_distance : Returns the distance between focus and center
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.foci
(Point(-2*sqrt(2), 0), Point(2*sqrt(2), 0))
"""
c = self.center
hr, vr = self.hradius, self.vradius
if hr == vr:
return (c, c)
# calculate focus distance manually, since focus_distance calls this
# routine
fd = sqrt(self.major**2 - self.minor**2)
if hr == self.minor:
# foci on the y-axis
return (c + Point(0, -fd), c + Point(0, fd))
elif hr == self.major:
# foci on the x-axis
return (c + Point(-fd, 0), c + Point(fd, 0))
def rotate(self, angle=0, pt=None):
"""Rotate ``angle`` radians counterclockwise about Point ``pt``.
Note: since the general ellipse is not supported, the axes of
the ellipse will not be rotated. Only the center is rotated to
a new position.
Examples
========
>>> from sympy import Ellipse, pi
>>> Ellipse((1, 0), 2, 1).rotate(pi/2)
Ellipse(Point(0, 1), 2, 1)
"""
return super(Ellipse, self).rotate(angle, pt)
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since it is the major and minor
axes which must be scaled and they are not GeometryEntities.
Examples
========
>>> from sympy import Ellipse
>>> Ellipse((0, 0), 2, 1).scale(2, 4)
Circle(Point(0, 0), 4)
>>> Ellipse((0, 0), 2, 1).scale(2)
Ellipse(Point(0, 0), 4, 1)
"""
c = self.center
if pt:
pt = Point(pt)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
h = self.hradius
v = self.vradius
return self.func(c.scale(x, y), hradius=h*x, vradius=v*y)
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point(1, 0), -1)
>>> from sympy import Ellipse, Line, Point
>>> Ellipse(Point(3, 4), 1, 3).reflect(Line(Point(0, -4), Point(5, 0)))
Traceback (most recent call last):
...
NotImplementedError:
General Ellipse is not supported but the equation of the reflected
Ellipse is given by the zeros of: f(x, y) = (9*x/41 + 40*y/41 +
37/41)**2 + (40*x/123 - 3*y/41 - 364/123)**2 - 1
Notes
=====
Until the general ellipse (with no axis parallel to the x-axis) is
supported a NotImplemented error is raised and the equation whose
zeros define the rotated ellipse is given.
"""
def _uniquely_named_symbol(xname, *exprs):
"""Return a symbol which, when printed, will have a name unique
from any other already in the expressions given. The name is made
unique by prepending underscores.
"""
prefix = '%s'
x = prefix % xname
syms = set.union(*[e.free_symbols for e in exprs])
while any(x == str(s) for s in syms):
prefix = '_' + prefix
x = prefix % xname
return _symbol(x)
if line.slope in (0, oo):
c = self.center
c = c.reflect(line)
return self.func(c, -self.hradius, self.vradius)
else:
x, y = [_uniquely_named_symbol(name, self, line) for name in 'xy']
expr = self.equation(x, y)
p = Point(x, y).reflect(line)
result = expr.subs(zip((x, y), p.args
), simultaneous=True)
raise NotImplementedError(filldedent(
'General Ellipse is not supported but the equation '
'of the reflected Ellipse is given by the zeros of: ' +
"f(%s, %s) = %s" % (str(x), str(y), str(result))))
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
-----
Being on the border of self is considered False.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Ellipse, S
>>> from sympy.abc import t
>>> e = Ellipse((0, 0), 3, 2)
>>> e.encloses_point((0, 0))
True
>>> e.encloses_point(e.arbitrary_point(t).subs(t, S.Half))
False
>>> e.encloses_point((4, 0))
False
"""
p = Point(p)
if p in self:
return False
if len(self.foci) == 2:
# if the combined distance from the foci to p (h1 + h2) is less
# than the combined distance from the foci to the minor axis
# (which is the same as the major axis length) then p is inside
# the ellipse
h1, h2 = [f.distance(p) for f in self.foci]
test = 2*self.major - (h1 + h2)
else:
test = self.radius - self.center.distance(p)
return fuzzy_bool(test.is_positive)
@doctest_depends_on(modules=('pyglet',))
def tangent_lines(self, p):
"""Tangent lines between `p` and the ellipse.
If `p` is on the ellipse, returns the tangent line through point `p`.
Otherwise, returns the tangent line(s) from `p` to the ellipse, or
None if no tangent line is possible (e.g., `p` inside ellipse).
Parameters
==========
p : Point
Returns
=======
tangent_lines : list with 1 or 2 Lines
Raises
======
NotImplementedError
Can only find tangent lines for a point, `p`, on the ellipse.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Line
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.tangent_lines(Point(3, 0))
[Line(Point(3, 0), Point(3, -12))]
>>> # This will plot an ellipse together with a tangent line.
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy import Point, Ellipse
>>> e = Ellipse(Point(0,0), 3, 2)
>>> t = e.tangent_lines(e.random_point())
>>> p = Plot()
>>> p[0] = e # doctest: +SKIP
>>> p[1] = t # doctest: +SKIP
"""
p = Point(p)
if self.encloses_point(p):
return []
if p in self:
delta = self.center - p
rise = (self.vradius ** 2)*delta.x
run = -(self.hradius ** 2)*delta.y
p2 = Point(simplify(p.x + run),
simplify(p.y + rise))
return [Line(p, p2)]
else:
if len(self.foci) == 2:
f1, f2 = self.foci
maj = self.hradius
test = (2*maj -
Point.distance(f1, p) -
Point.distance(f2, p))
else:
test = self.radius - Point.distance(self.center, p)
if test.is_number and test.is_positive:
return []
# else p is outside the ellipse or we can't tell. In case of the
# latter, the solutions returned will only be valid if
# the point is not inside the ellipse; if it is, nan will result.
x, y = Dummy('x'), Dummy('y')
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
slope = Line(p, Point(x, y)).slope
tangent_points = solve([slope - dydx, eq], [x, y])
# handle horizontal and vertical tangent lines
if len(tangent_points) == 1:
assert tangent_points[0][
0] == p.x or tangent_points[0][1] == p.y
return [Line(p, p + Point(1, 0)), Line(p, p + Point(0, 1))]
# others
return [Line(p, tangent_points[0]), Line(p, tangent_points[1])]
def is_tangent(self, o):
"""Is `o` tangent to the ellipse?
Parameters
==========
o : GeometryEntity
An Ellipse, LinearEntity or Polygon
Raises
======
NotImplementedError
When the wrong type of argument is supplied.
Returns
=======
is_tangent: boolean
True if o is tangent to the ellipse, False otherwise.
See Also
========
tangent_lines
Examples
========
>>> from sympy import Point, Ellipse, Line
>>> p0, p1, p2 = Point(0, 0), Point(3, 0), Point(3, 3)
>>> e1 = Ellipse(p0, 3, 2)
>>> l1 = Line(p1, p2)
>>> e1.is_tangent(l1)
True
"""
inter = None
if isinstance(o, Ellipse):
inter = self.intersection(o)
if isinstance(inter, Ellipse):
return False
return (inter is not None and isinstance(inter[0], Point)
and len(inter) == 1)
elif isinstance(o, LinearEntity):
inter = self._do_line_intersection(o)
if inter is not None and len(inter) == 1:
return inter[0] in o
else:
return False
elif isinstance(o, Polygon):
c = 0
for seg in o.sides:
inter = self._do_line_intersection(seg)
c += len([True for point in inter if point in seg])
return c == 1
else:
raise NotImplementedError("Unknown argument type")
def normal_lines(self, p, prec=None):
"""Normal lines between `p` and the ellipse.
Parameters
==========
p : Point
Returns
=======
normal_lines : list with 1, 2 or 4 Lines
Examples
========
>>> from sympy import Line, Point, Ellipse
>>> e = Ellipse((0, 0), 2, 3)
>>> c = e.center
>>> e.normal_lines(c + Point(1, 0))
[Line(Point(0, 0), Point(1, 0))]
>>> e.normal_lines(c)
[Line(Point(0, 0), Point(0, 1)), Line(Point(0, 0), Point(1, 0))]
Off-axis points require the solution of a quartic equation. This
often leads to very large expressions that may be of little practical
use. An approximate solution of `prec` digits can be obtained by
passing in the desired value:
>>> e.normal_lines((3, 3), prec=2)
[Line(Point(-38/47, -85/31), Point(9/47, -21/17)),
Line(Point(19/13, -43/21), Point(32/13, -8/3))]
Whereas the above solution has an operation count of 12, the exact
solution has an operation count of 2020.
"""
p = Point(p)
# XXX change True to something like self.angle == 0 if the arbitrarily
# rotated ellipse is introduced.
# https://github.com/sympy/sympy/issues/2815)
if True:
rv = []
if p.x == self.center.x:
rv.append(Line(self.center, slope=oo))
if p.y == self.center.y:
rv.append(Line(self.center, slope=0))
if rv:
# at these special orientations of p either 1 or 2 normals
# exist and we are done
return rv
# find the 4 normal points and construct lines through them with
# the corresponding slope
x, y = Dummy('x', real=True), Dummy('y', real=True)
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
norm = -1/dydx
slope = Line(p, (x, y)).slope
seq = slope - norm
points = []
if prec is not None:
yis = solve(seq, y)[0]
xeq = eq.subs(y, yis).as_numer_denom()[0].expand()
try:
iv = list(zip(*Poly(xeq).intervals()))[0]
# bisection is safest here since other methods may miss root
xsol = [S(nroot(lambdify(x, xeq), i, solver="anderson"))
for i in iv]
points = [Point(i, solve(eq.subs(x, i), y)[0]).n(prec)
for i in xsol]
except PolynomialError:
pass
if not points:
points = solve((seq, eq), (x, y))
# complicated expressions may not be decidably real so evaluate to
# check whether they are real or not
points = [Point(i).n(prec) if prec is not None else Point(i)
for i in points if all(j.n(2).is_real for j in i)]
slopes = [norm.subs(zip((x, y), pt.args)) for pt in points]
if prec is not None:
slopes = [i.n(prec) if i not in (-oo, oo, zoo) else i
for i in slopes]
return [Line(pt, slope=s) for pt,s in zip(points, slopes)]
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
arbitrary_point : Point
Raises
======
ValueError
When `parameter` already appears in the functions.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.arbitrary_point()
Point(3*cos(t), 2*sin(t))
"""
t = _symbol(parameter)
if t.name in (f.name for f in self.free_symbols):
raise ValueError(filldedent('Symbol %s already appears in object '
'and cannot be used as a parameter.' % t.name))
return Point(self.center.x + self.hradius*C.cos(t),
self.center.y + self.vradius*C.sin(t))
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.plot_interval()
[t, -pi, pi]
"""
t = _symbol(parameter)
return [t, -S.Pi, S.Pi]
def random_point(self, seed=None):
"""A random point on the ellipse.
Returns
=======
point : Point
See Also
========
sympy.geometry.point.Point
arbitrary_point : Returns parameterized point on ellipse
Notes
-----
A random point may not appear to be on the ellipse, ie, `p in e` may
return False. This is because the coordinates of the point will be
floating point values, and when these values are substituted into the
equation for the ellipse the result may not be zero because of floating
point rounding error.
Examples
========
>>> from sympy import Point, Ellipse, Segment
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.random_point() # gives some random point
Point(...)
>>> p1 = e1.random_point(seed=0); p1.n(2)
Point(2.1, 1.4)
The random_point method assures that the point will test as being
in the ellipse:
>>> p1 in e1
True
Notes
=====
An arbitrary_point with a random value of t substituted into it may
not test as being on the ellipse because the expression tested that
a point is on the ellipse doesn't simplify to zero and doesn't evaluate
exactly to zero:
>>> from sympy.abc import t
>>> e1.arbitrary_point(t)
Point(3*cos(t), 2*sin(t))
>>> p2 = _.subs(t, 0.1)
>>> p2 in e1
False
Note that arbitrary_point routine does not take this approach. A value
for cos(t) and sin(t) (not t) is substituted into the arbitrary point.
There is a small chance that this will give a point that will not
test as being in the ellipse, so the process is repeated (up to 10
times) until a valid point is obtained.
"""
from sympy import sin, cos, Rational
t = _symbol('t')
x, y = self.arbitrary_point(t).args
# get a random value in [-1, 1) corresponding to cos(t)
# and confirm that it will test as being in the ellipse
if seed is not None:
rng = random.Random(seed)
else:
rng = random
for i in range(10): # should be enough?
# simplify this now or else the Float will turn s into a Float
c = 2*Rational(rng.random()) - 1
s = sqrt(1 - c**2)
p1 = Point(x.subs(cos(t), c), y.subs(sin(t), s))
if p1 in self:
return p1
raise GeometryError(
'Having problems generating a point in the ellipse.')
def equation(self, x='x', y='y'):
"""The equation of the ellipse.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : sympy expression
See Also
========
arbitrary_point : Returns parameterized point on ellipse
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(1, 0), 3, 2)
>>> e1.equation()
y**2/4 + (x/3 - 1/3)**2 - 1
"""
x = _symbol(x)
y = _symbol(y)
t1 = ((x - self.center.x) / self.hradius)**2
t2 = ((y - self.center.y) / self.vradius)**2
return t1 + t2 - 1
def _do_line_intersection(self, o):
"""
Find the intersection of a LinearEntity and the ellipse.
All LinearEntities are treated as a line and filtered at
the end to see that they lie in o.
"""
hr_sq = self.hradius ** 2
vr_sq = self.vradius ** 2
lp = o.points
ldir = lp[1] - lp[0]
diff = lp[0] - self.center
mdir = Point(ldir.x/hr_sq, ldir.y/vr_sq)
mdiff = Point(diff.x/hr_sq, diff.y/vr_sq)
a = ldir.dot(mdir)
b = ldir.dot(mdiff)
c = diff.dot(mdiff) - 1
det = simplify(b*b - a*c)
result = []
if det == 0:
t = -b / a
result.append(lp[0] + (lp[1] - lp[0]) * t)
else:
is_good = True
try:
is_good = (det > 0)
except NotImplementedError: # symbolic, allow
is_good = True
if is_good:
root = sqrt(det)
t_a = (-b - root) / a
t_b = (-b + root) / a
result.append( lp[0] + (lp[1] - lp[0]) * t_a )
result.append( lp[0] + (lp[1] - lp[0]) * t_b )
return [r for r in result if r in o]
def _do_ellipse_intersection(self, o):
"""The intersection of an ellipse with another ellipse or a circle.
Private helper method for `intersection`.
"""
x = Dummy('x', real=True)
y = Dummy('y', real=True)
seq = self.equation(x, y)
oeq = o.equation(x, y)
result = solve([seq, oeq], [x, y])
return [Point(*r) for r in list(uniq(result))]
def intersection(self, o):
"""The intersection of this ellipse and another geometrical entity
`o`.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntity objects
Notes
-----
Currently supports intersections with Point, Line, Segment, Ray,
Circle and Ellipse types.
See Also
========
sympy.geometry.entity.GeometryEntity
Examples
========
>>> from sympy import Ellipse, Point, Line, sqrt
>>> e = Ellipse(Point(0, 0), 5, 7)
>>> e.intersection(Point(0, 0))
[]
>>> e.intersection(Point(5, 0))
[Point(5, 0)]
>>> e.intersection(Line(Point(0,0), Point(0, 1)))
[Point(0, -7), Point(0, 7)]
>>> e.intersection(Line(Point(5,0), Point(5, 1)))
[Point(5, 0)]
>>> e.intersection(Line(Point(6,0), Point(6, 1)))
[]
>>> e = Ellipse(Point(-1, 0), 4, 3)
>>> e.intersection(Ellipse(Point(1, 0), 4, 3))
[Point(0, -3*sqrt(15)/4), Point(0, 3*sqrt(15)/4)]
>>> e.intersection(Ellipse(Point(5, 0), 4, 3))
[Point(2, -3*sqrt(7)/4), Point(2, 3*sqrt(7)/4)]
>>> e.intersection(Ellipse(Point(100500, 0), 4, 3))
[]
>>> e.intersection(Ellipse(Point(0, 0), 3, 4))
[Point(-363/175, -48*sqrt(111)/175), Point(-363/175, 48*sqrt(111)/175), Point(3, 0)]
>>> e.intersection(Ellipse(Point(-1, 0), 3, 4))
[Point(-17/5, -12/5), Point(-17/5, 12/5), Point(7/5, -12/5), Point(7/5, 12/5)]
"""
if isinstance(o, Point):
if o in self:
return [o]
else:
return []
elif isinstance(o, LinearEntity):
# LinearEntity may be a ray/segment, so check the points
# of intersection for coincidence first
return self._do_line_intersection(o)
elif isinstance(o, Circle):
return self._do_ellipse_intersection(o)
elif isinstance(o, Ellipse):
if o == self:
return self
else:
return self._do_ellipse_intersection(o)
return o.intersection(self)
def __eq__(self, o):
"""Is the other GeometryEntity the same as this ellipse?"""
return isinstance(o, GeometryEntity) and (self.center == o.center and
self.hradius == o.hradius and
self.vradius == o.vradius)
def __hash__(self):
return super(Ellipse, self).__hash__()
def __contains__(self, o):
if isinstance(o, Point):
x = C.Dummy('x', real=True)
y = C.Dummy('y', real=True)
res = self.equation(x, y).subs({x: o.x, y: o.y})
return trigsimp(simplify(res)) is S.Zero
elif isinstance(o, Ellipse):
return self == o
return False
class Circle(Ellipse):
"""A circle in space.
Constructed simply from a center and a radius, or from three
non-collinear points.
Parameters
==========
center : Point
radius : number or sympy expression
points : sequence of three Points
Attributes
==========
radius (synonymous with hradius, vradius, major and minor)
circumference
equation
Raises
======
GeometryError
When trying to construct circle from three collinear points.
When trying to construct circle from incorrect parameters.
See Also
========
Ellipse, sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Circle
>>> # a circle constructed from a center and radius
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.hradius, c1.vradius, c1.radius
(5, 5, 5)
>>> # a circle costructed from three points
>>> c2 = Circle(Point(0, 0), Point(1, 1), Point(1, 0))
>>> c2.hradius, c2.vradius, c2.radius, c2.center
(sqrt(2)/2, sqrt(2)/2, sqrt(2)/2, Point(1/2, 1/2))
"""
def __new__(cls, *args, **kwargs):
c, r = None, None
if len(args) == 3:
args = [Point(a) for a in args]
if Point.is_collinear(*args):
raise GeometryError(
"Cannot construct a circle from three collinear points")
from .polygon import Triangle
t = Triangle(*args)
c = t.circumcenter
r = t.circumradius
elif len(args) == 2:
# Assume (center, radius) pair
c = Point(args[0])
r = sympify(args[1])
if not (c is None or r is None):
return GeometryEntity.__new__(cls, c, r, **kwargs)
raise GeometryError("Circle.__new__ received unknown arguments")
@property
def radius(self):
"""The radius of the circle.
Returns
=======
radius : number or sympy expression
See Also
========
Ellipse.major, Ellipse.minor, Ellipse.hradius, Ellipse.vradius
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.radius
6
"""
return self.args[1]
@property
def vradius(self):
"""
This Ellipse property is an alias for the Circle's radius.
Whereas hradius, major and minor can use Ellipse's conventions,
the vradius does not exist for a circle. It is always a positive
value in order that the Circle, like Polygons, will have an
area that can be positive or negative as determined by the sign
of the hradius.
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.vradius
6
"""
return abs(self.radius)
@property
def circumference(self):
"""The circumference of the circle.
Returns
=======
circumference : number or SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.circumference
12*pi
"""
return 2 * S.Pi * self.radius
def equation(self, x='x', y='y'):
"""The equation of the circle.
Parameters
==========
x : str or Symbol, optional
Default value is 'x'.
y : str or Symbol, optional
Default value is 'y'.
Returns
=======
equation : SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.equation()
x**2 + y**2 - 25
"""
x = _symbol(x)
y = _symbol(y)
t1 = (x - self.center.x)**2
t2 = (y - self.center.y)**2
return t1 + t2 - self.major**2
def intersection(self, o):
"""The intersection of this circle with another geometrical entity.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntities
Examples
========
>>> from sympy import Point, Circle, Line, Ray
>>> p1, p2, p3 = Point(0, 0), Point(5, 5), Point(6, 0)
>>> p4 = Point(5, 0)
>>> c1 = Circle(p1, 5)
>>> c1.intersection(p2)
[]
>>> c1.intersection(p4)
[Point(5, 0)]
>>> c1.intersection(Ray(p1, p2))
[Point(5*sqrt(2)/2, 5*sqrt(2)/2)]
>>> c1.intersection(Line(p2, p3))
[]
"""
if isinstance(o, Circle):
if o.center == self.center:
if o.radius == self.radius:
return o
return []
dx, dy = (o.center - self.center).args
d = sqrt(simplify(dy**2 + dx**2))
R = o.radius + self.radius
if d > R or d < abs(self.radius - o.radius):
return []
a = simplify((self.radius**2 - o.radius**2 + d**2) / (2*d))
x2 = self.center.x + (dx * a/d)
y2 = self.center.y + (dy * a/d)
h = sqrt(simplify(self.radius**2 - a**2))
rx = -dy * (h/d)
ry = dx * (h/d)
xi_1 = simplify(x2 + rx)
xi_2 = simplify(x2 - rx)
yi_1 = simplify(y2 + ry)
yi_2 = simplify(y2 - ry)
ret = [Point(xi_1, yi_1)]
if xi_1 != xi_2 or yi_1 != yi_2:
ret.append(Point(xi_2, yi_2))
return ret
return Ellipse.intersection(self, o)
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle
>>> Circle((0, 0), 1).scale(2, 2)
Circle(Point(0, 0), 2)
>>> Circle((0, 0), 1).scale(2, 4)
Ellipse(Point(0, 0), 2, 4)
"""
c = self.center
if pt:
pt = Point(pt)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
c = c.scale(x, y)
x, y = [abs(i) for i in (x, y)]
if x == y:
return self.func(c, x*self.radius)
h = v = self.radius
return Ellipse(c, hradius=h*x, vradius=v*y)
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point(1, 0), -1)
"""
c = self.center
c = c.reflect(line)
return self.func(c, -self.radius)
from .polygon import Polygon
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.nova import server
from heat.engine import support
try:
import pyrax # noqa
PYRAX_INSTALLED = True
except ImportError:
PYRAX_INSTALLED = False
LOG = logging.getLogger(__name__)
class CloudServer(server.Server):
"""Resource for Rackspace Cloud Servers.
This resource overloads existent integrated OS::Nova::Server resource and
is used for Rackspace Cloud Servers.
"""
support_status = support.SupportStatus(
status=support.UNSUPPORTED,
message=_('This resource is not supported, use at your own risk.'))
# Rackspace Cloud automation statuses
SM_STATUS_IN_PROGRESS = 'In Progress'
SM_STATUS_COMPLETE = 'Complete'
SM_STATUS_BUILD_ERROR = 'Build Error'
# RackConnect automation statuses
RC_STATUS_DEPLOYING = 'DEPLOYING'
RC_STATUS_DEPLOYED = 'DEPLOYED'
RC_STATUS_FAILED = 'FAILED'
RC_STATUS_UNPROCESSABLE = 'UNPROCESSABLE'
# Nova Extra specs
FLAVOR_EXTRA_SPECS = 'OS-FLV-WITH-EXT-SPECS:extra_specs'
FLAVOR_CLASSES_KEY = 'flavor_classes'
FLAVOR_ACCEPT_ANY = '*'
FLAVOR_CLASS = 'class'
DISK_IO_INDEX = 'disk_io_index'
FLAVOR_CLASSES = (
GENERAL1, MEMORY1, PERFORMANCE2, PERFORMANCE1, STANDARD1, IO1,
ONMETAL, COMPUTE1
) = (
'general1', 'memory1', 'performance2', 'performance1',
'standard1', 'io1', 'onmetal', 'compute1',
)
BASE_IMAGE_REF = 'base_image_ref'
# flavor classes that can be booted ONLY from volume
BFV_VOLUME_REQUIRED = {MEMORY1, COMPUTE1}
# flavor classes that can NOT be booted from volume
NON_BFV = {STANDARD1, ONMETAL}
properties_schema = copy.deepcopy(server.Server.properties_schema)
properties_schema.update(
{
server.Server.USER_DATA_FORMAT: properties.Schema(
properties.Schema.STRING,
_('How the user_data should be formatted for the server. '
'For RAW the user_data is passed to Nova unmodified. '
'For SOFTWARE_CONFIG user_data is bundled as part of the '
'software config data, and metadata is derived from any '
'associated SoftwareDeployment resources.'),
default=server.Server.RAW,
constraints=[
constraints.AllowedValues([
server.Server.RAW, server.Server.SOFTWARE_CONFIG
])
]
),
}
)
properties_schema.update(
{
server.Server.SOFTWARE_CONFIG_TRANSPORT: properties.Schema(
properties.Schema.STRING,
_('How the server should receive the metadata required for '
'software configuration. POLL_TEMP_URL is the only '
'supported transport on Rackspace Cloud. This property is '
'retained for compatibility.'),
default=server.Server.POLL_TEMP_URL,
update_allowed=True,
constraints=[
constraints.AllowedValues([
server.Server.POLL_TEMP_URL
])
]
),
}
)
def __init__(self, name, json_snippet, stack):
super(CloudServer, self).__init__(name, json_snippet, stack)
self._managed_cloud_started_event_sent = False
self._rack_connect_started_event_sent = False
def _config_drive(self):
user_data_format = self.properties[self.USER_DATA_FORMAT]
is_sw_config = user_data_format == self.SOFTWARE_CONFIG
user_data = self.properties.get(self.USER_DATA)
config_drive = self.properties.get(self.CONFIG_DRIVE)
if config_drive or is_sw_config or user_data:
return True
else:
return False
def _check_rax_automation_complete(self, server):
if not self._managed_cloud_started_event_sent:
msg = _("Waiting for Rackspace Cloud automation to complete")
self._add_event(self.action, self.status, msg)
self._managed_cloud_started_event_sent = True
if 'rax_service_level_automation' not in server.metadata:
LOG.debug("Cloud server does not have the "
"rax_service_level_automation metadata tag yet")
return False
mc_status = server.metadata['rax_service_level_automation']
LOG.debug("Rackspace Cloud automation status: %s" % mc_status)
if mc_status == self.SM_STATUS_IN_PROGRESS:
return False
elif mc_status == self.SM_STATUS_COMPLETE:
msg = _("Rackspace Cloud automation has completed")
self._add_event(self.action, self.status, msg)
return True
elif mc_status == self.SM_STATUS_BUILD_ERROR:
raise exception.Error(_("Rackspace Cloud automation failed"))
else:
raise exception.Error(_("Unknown Rackspace Cloud automation "
"status: %s") % mc_status)
def _check_rack_connect_complete(self, server):
if not self._rack_connect_started_event_sent:
msg = _("Waiting for RackConnect automation to complete")
self._add_event(self.action, self.status, msg)
self._rack_connect_started_event_sent = True
if 'rackconnect_automation_status' not in server.metadata:
LOG.debug("RackConnect server does not have the "
"rackconnect_automation_status metadata tag yet")
return False
rc_status = server.metadata['rackconnect_automation_status']
LOG.debug("RackConnect automation status: %s" % rc_status)
if rc_status == self.RC_STATUS_DEPLOYING:
return False
elif rc_status == self.RC_STATUS_DEPLOYED:
self._server = None # The public IP changed, forget old one
return True
elif rc_status == self.RC_STATUS_UNPROCESSABLE:
# UNPROCESSABLE means the RackConnect automation was not
# attempted (eg. Cloud Server in a different DC than
# dedicated gear, so RackConnect does not apply). It is
# okay if we do not raise an exception.
reason = server.metadata.get('rackconnect_unprocessable_reason',
None)
if reason is not None:
LOG.warning(_LW("RackConnect unprocessable reason: %s"),
reason)
msg = _("RackConnect automation has completed")
self._add_event(self.action, self.status, msg)
return True
elif rc_status == self.RC_STATUS_FAILED:
raise exception.Error(_("RackConnect automation FAILED"))
else:
msg = _("Unknown RackConnect automation status: %s") % rc_status
raise exception.Error(msg)
def check_create_complete(self, server_id):
"""Check if server creation is complete and handle server configs."""
if not super(CloudServer, self).check_create_complete(server_id):
return False
server = self.client_plugin().fetch_server(server_id)
if not server:
return False
if ('rack_connect' in self.context.roles and not
self._check_rack_connect_complete(server)):
return False
if not self._check_rax_automation_complete(server):
return False
return True
# Since rackspace compute service does not support 'os-interface' endpoint,
# accessing addresses attribute of OS::Nova::Server results in NotFound
# error. Here overrdiing '_add_port_for_address' method and using different
# endpoint named 'os-virtual-interfacesv2' to get the same information.
def _add_port_for_address(self, server):
def get_port(net_name, address):
for iface in ifaces:
for ip_addr in iface.ip_addresses:
if ip_addr['network_label'] == net_name and ip_addr[
'address'] == address:
return iface.id
nets = copy.deepcopy(server.addresses)
nova_ext = self.client().os_virtual_interfacesv2_python_novaclient_ext
ifaces = nova_ext.list(server.id)
for net_name, addresses in nets.items():
for address in addresses:
address['port'] = get_port(net_name, address['addr'])
return self._extend_networks(nets)
def _base_image_obj(self, image):
image_obj = self.client_plugin('glance').get_image(image)
if self.BASE_IMAGE_REF in image_obj:
base_image = image_obj[self.BASE_IMAGE_REF]
return self.client_plugin('glance').get_image(base_image)
return image_obj
def _image_flavor_class_match(self, flavor_type, image):
base_image_obj = self._base_image_obj(image)
flavor_class_string = base_image_obj.get(self.FLAVOR_CLASSES_KEY)
# If the flavor_class_string metadata does not exist or is
# empty, do not validate image/flavor combo
if not flavor_class_string:
return True
flavor_class_excluded = "!{0}".format(flavor_type)
flavor_classes_accepted = flavor_class_string.split(',')
if flavor_type in flavor_classes_accepted:
return True
if (self.FLAVOR_ACCEPT_ANY in flavor_classes_accepted and
flavor_class_excluded not in flavor_classes_accepted):
return True
return False
def validate(self):
"""Validate for Rackspace Cloud specific parameters"""
super(CloudServer, self).validate()
# check if image, flavor combination is valid
flavor = self.properties[self.FLAVOR]
flavor_obj = self.client_plugin().get_flavor(flavor)
fl_xtra_specs = flavor_obj.to_dict().get(self.FLAVOR_EXTRA_SPECS, {})
flavor_type = fl_xtra_specs.get(self.FLAVOR_CLASS, None)
image = self.properties.get(self.IMAGE)
if not image:
if flavor_type in self.NON_BFV:
msg = _('Flavor %s cannot be booted from volume.') % flavor
raise exception.StackValidationFailed(message=msg)
else:
# we cannot determine details of the attached volume, so this
# is all the validation possible
return
if not self._image_flavor_class_match(flavor_type, image):
msg = _('Flavor %(flavor)s cannot be used with image '
'%(image)s.') % {'image': image, 'flavor': flavor}
raise exception.StackValidationFailed(message=msg)
if flavor_type in self.BFV_VOLUME_REQUIRED:
msg = _('Flavor %(flavor)s must be booted from volume, '
'but image %(image)s was also specified.') % {
'flavor': flavor, 'image': image}
raise exception.StackValidationFailed(message=msg)
def resource_mapping():
return {'OS::Nova::Server': CloudServer}
def available_resource_mapping():
if PYRAX_INSTALLED:
return resource_mapping()
return {}
| |
# Copyright 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fcntl
import hashlib
import os
import shutil
import subprocess
import sys
# Allow use of this module even if termcolor is missing. There are many
# standalone python scripts in build_tools that can be run directly without
# PYTHONPATH set (i.e. not via build/python_wrapper that adds this path.
# TODO(sbc): we should probably just assume that all the module dependencies
# are present.
try:
import termcolor
except ImportError:
termcolor = None
from naclports import error, paths
GS_URL = 'http://storage.googleapis.com/'
GS_BUCKET = 'naclports'
GS_MIRROR_URL = '%s%s/mirror' % (GS_URL, GS_BUCKET)
# Require the latest version of the NaCl SDK. naclports is built
# and tested against the pepper_canary release. To build aginst older
# versions of the SDK use the one of the pepper_XX branches (or use
# --skip-sdk-version-check).
MIN_SDK_VERSION = 46
arch_to_pkgarch = {
'x86_64': 'x86-64',
'i686': 'i686',
'arm': 'arm',
'pnacl': 'pnacl',
'emscripten': 'emscripten',
}
# Inverse of arch_to_pkgarch
pkgarch_to_arch = {v: k for k, v in arch_to_pkgarch.items()}
LOG_ERROR = 0
LOG_WARN = 1
LOG_INFO = 2
LOG_VERBOSE = 3
LOG_TRACE = 4
ELF_MAGIC = '\x7fELF'
PEXE_MAGIC = 'PEXE'
log_level = LOG_INFO
color_mode = 'auto'
def Color(message, color):
if termcolor and Color.enabled:
return termcolor.colored(message, color)
else:
return message
def CheckStdoutForColorSupport():
if color_mode == 'auto':
Color.enabled = sys.stdout.isatty()
def IsElfFile(filename):
if os.path.islink(filename):
return False
with open(filename) as f:
header = f.read(4)
return header == ELF_MAGIC
def IsPexeFile(filename):
if os.path.islink(filename):
return False
with open(filename) as f:
header = f.read(4)
return header == PEXE_MAGIC
def Memoize(f):
"""Memoization decorator for functions taking one or more arguments."""
class Memo(dict):
def __init__(self, f):
super(Memo, self).__init__()
self.f = f
def __call__(self, *args):
return self[args]
def __missing__(self, key):
ret = self[key] = self.f(*key)
return ret
return Memo(f)
def SetVerbose(enabled):
if enabled:
SetLogLevel(LOG_VERBOSE)
else:
SetLogLevel(LOG_INFO)
def SetLogLevel(verbosity):
global log_level
log_level = verbosity
def Log(message, verbosity=LOG_INFO):
"""Log a message to the console (stdout)."""
if log_level < verbosity:
return
sys.stdout.write(str(message) + '\n')
sys.stdout.flush()
def LogHeading(message, suffix=''):
"""Log a colored/highlighted message with optional suffix."""
if Color.enabled:
Log(Color(message, 'green') + suffix)
else:
if log_level > LOG_WARN:
# When running in verbose mode make sure heading standout
Log('###################################################################')
Log(message + suffix)
Log('###################################################################')
else:
Log(message + suffix)
def Warn(message):
Log('warning: ' + message, LOG_WARN)
def Trace(message):
Log(message, LOG_TRACE)
def LogVerbose(message):
Log(message, LOG_VERBOSE)
def FindInPath(command_name):
"""Search user's PATH for a given executable.
Returns:
Full path to executable.
"""
extensions = ('',)
if not os.path.splitext(command_name)[1] and os.name == 'nt':
extensions = ('.bat', '.com', '.exe')
for path in os.environ.get('PATH', '').split(os.pathsep):
for ext in extensions:
full_name = os.path.join(path, command_name + ext)
if os.path.exists(full_name) and os.path.isfile(full_name):
return full_name
raise error.Error('command not found: %s' % command_name)
def DownloadFile(filename, url):
"""Download a file from a given URL.
Args:
filename: the name of the file to download the URL to.
url: then URL to fetch.
"""
temp_filename = filename + '.partial'
# Ensure curl is in user's PATH
FindInPath('curl')
curl_cmd = ['curl', '--fail', '--location', '--stderr', '-', '-o',
temp_filename]
if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()):
# Add --progress-bar but only if stdout is a TTY device.
curl_cmd.append('--progress-bar')
else:
# otherwise suppress status output, since curl always assumes its
# talking to a TTY and writes \r and \b characters. But add
# --show-error so that when curl fails it at least prints something.
curl_cmd += ['--silent', '--show-error']
curl_cmd.append(url)
if log_level > LOG_WARN:
Log('Downloading: %s [%s]' % (url, filename))
else:
Log('Downloading: %s' % url.replace(GS_URL, ''))
try:
subprocess.check_call(curl_cmd)
except subprocess.CalledProcessError as e:
raise error.Error('Error downloading file: %s' % str(e))
os.rename(temp_filename, filename)
def CheckStamp(filename, contents=None):
"""Check that a given stamp file is up-to-date.
Returns: False is the file does not exists or is older that that given
comparison file, or does not contain the given contents. True otherwise.
"""
if not os.path.exists(filename):
return False
if contents is not None:
with open(filename) as f:
if not f.read().startswith(contents):
return False
return True
@Memoize
def GetSDKRoot():
"""Returns the root of the currently configured Native Client SDK."""
root = os.environ.get('NACL_SDK_ROOT')
if root is None:
local_sdk_root = os.path.join(paths.OUT_DIR, 'nacl_sdk')
if os.path.exists(local_sdk_root):
root = local_sdk_root
else:
raise error.Error('$NACL_SDK_ROOT not set')
if sys.platform == "cygwin":
root = root.replace('\\', '/')
return root
@Memoize
def GetEmscriptenRoot():
emscripten = os.environ.get('EMSCRIPTEN')
if emscripten is None:
local_root = os.path.join(paths.OUT_DIR, 'emsdk', 'emscripten')
if os.path.exists(local_root):
emscripten = local_root
else:
raise error.Error('$EMSCRIPTEN not set and %s does not exist.' %
local_root)
if not os.path.isdir(emscripten):
raise error.Error('$EMSCRIPTEN environment variable does not point'
' to a directory: %s' % emscripten)
return emscripten
def SetupEmscripten():
if 'EMSCRIPTEN' in os.environ:
return
local_root = GetEmscriptenRoot()
os.environ['EMSCRIPTEN'] = local_root
os.environ['EM_CONFIG'] = os.path.join(os.path.dirname(local_root),
'.emscripten')
try:
FindInPath('node')
except error.Error:
node_bin = os.path.join(paths.OUT_DIR, 'node', 'bin')
if not os.path.isdir(node_bin):
raise error.Error('node not found in path and default path not found: %s'
% node_bin)
os.environ['PATH'] += ':' + node_bin
FindInPath('node')
@Memoize
def GetSDKVersion():
"""Returns the version (as a string) of the current SDK."""
getos = os.path.join(GetSDKRoot(), 'tools', 'getos.py')
version = subprocess.check_output([getos, '--sdk-version']).strip()
return version
def CheckSDKVersion(version):
"""Returns True if the currently configured SDK is 'version' or above."""
return int(GetSDKVersion()) >= int(version)
@Memoize
def GetSDKRevision():
"""Returns the revision of the currently configured Native Client SDK."""
getos = os.path.join(GetSDKRoot(), 'tools', 'getos.py')
version = subprocess.check_output([getos, '--sdk-revision']).strip()
return int(version)
@Memoize
def GetPlatform():
"""Returns the current platform name according getos.py."""
getos = os.path.join(GetSDKRoot(), 'tools', 'getos.py')
platform = subprocess.check_output([getos]).strip()
return platform
@Memoize
def GetToolchainRoot(config):
"""Returns the toolchain folder for a given NaCl toolchain."""
if config.toolchain == 'emscripten':
return GetEmscriptenRoot()
platform = GetPlatform()
if config.toolchain in ('pnacl', 'clang-newlib'):
tc_dir = os.path.join('%s_pnacl' % platform)
else:
tc_arch = {'arm': 'arm', 'i686': 'x86', 'x86_64': 'x86'}[config.arch]
tc_dir = '%s_%s_%s' % (platform, tc_arch, config.libc)
return os.path.join(GetSDKRoot(), 'toolchain', tc_dir)
@Memoize
def GetInstallRoot(config):
"""Returns the naclports install location given NaCl configuration."""
tc_dir = GetToolchainRoot(config)
if config.toolchain == 'emscripten':
return os.path.join(tc_dir, 'system', 'local')
if config.toolchain == 'pnacl':
tc_dir = os.path.join(tc_dir, 'le32-nacl')
else:
tc_dir = os.path.join(tc_dir, '%s-nacl' % config.arch)
return os.path.join(tc_dir, 'usr')
@Memoize
def GetInstallStampRoot(config):
"""Returns the installation metadata folder for the give configuration."""
tc_root = GetInstallRoot(config)
return os.path.join(tc_root, 'var', 'lib', 'npkg')
@Memoize
def GetStrip(config):
tc_dir = GetToolchainRoot(config)
if config.toolchain == 'pnacl':
strip = os.path.join(tc_dir, 'bin', 'pnacl-strip')
else:
strip = os.path.join(tc_dir, 'bin', '%s-nacl-strip' % config.arch)
assert os.path.exists(strip), 'strip executable not found: %s' % strip
return strip
def GetInstallStamp(package_name, config):
"""Returns the filename of the install stamp for for a given package.
This file is written at install time and contains metadata
about the installed package.
"""
root = GetInstallStampRoot(config)
return os.path.join(root, package_name + '.info')
def GetListFile(package_name, config):
"""Returns the filename of the list of installed files for a given package.
This file is written at install time.
"""
root = GetInstallStampRoot(config)
return os.path.join(root, package_name + '.list')
def IsInstalled(package_name, config, stamp_content=None):
"""Returns True if the given package is installed."""
stamp = GetInstallStamp(package_name, config)
result = CheckStamp(stamp, stamp_content)
return result
def CheckSDKRoot():
"""Check validity of NACL_SDK_ROOT."""
root = GetSDKRoot()
if not os.path.isdir(root):
raise error.Error('$NACL_SDK_ROOT does not exist: %s' % root)
landmark = os.path.join(root, 'tools', 'getos.py')
if not os.path.exists(landmark):
raise error.Error("$NACL_SDK_ROOT (%s) doesn't look right. "
"Couldn't find landmark file (%s)" % (root, landmark))
if not CheckSDKVersion(MIN_SDK_VERSION):
raise error.Error(
'This version of naclports requires at least version %s of\n'
'the NaCl SDK. The version in $NACL_SDK_ROOT is %s. If you want\n'
'to use naclports with an older version of the SDK please checkout\n'
'one of the pepper_XX branches (or run with\n'
'--skip-sdk-version-check).' % (MIN_SDK_VERSION, GetSDKVersion()))
def HashFile(filename):
"""Return the SHA1 (in hex format) of the contents of the given file."""
block_size = 100 * 1024
sha1 = hashlib.sha1()
with open(filename) as f:
while True:
data = f.read(block_size)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
class HashVerificationError(error.Error):
pass
def VerifyHash(filename, sha1):
"""Return True if the sha1 of the given file match the sha1 passed in."""
file_sha1 = HashFile(filename)
if sha1 != file_sha1:
raise HashVerificationError(
'verification failed: %s\nExpected: %s\nActual: %s' %
(filename, sha1, file_sha1))
def RemoveTree(directory):
"""Recursively remove a directory and its contents."""
if not os.path.exists(directory):
return
if not os.path.isdir(directory):
raise error.Error('RemoveTree: not a directory: %s', directory)
shutil.rmtree(directory)
def RelPath(filename):
"""Return a pathname relative to the root the naclports src tree.
This is used mostly to make output more readable when printing filenames."""
return os.path.relpath(filename, paths.NACLPORTS_ROOT)
def Makedirs(directory):
if os.path.isdir(directory):
return
if os.path.exists(directory):
raise error.Error('mkdir: File exists and is not a directory: %s' %
directory)
Trace("mkdir: %s" % directory)
os.makedirs(directory)
class Lock(object):
"""Per-directory flock()-based context manager
This class will raise an exception if another process already holds the
lock for the given directory.
"""
def __init__(self, lock_dir):
if not os.path.exists(lock_dir):
Makedirs(lock_dir)
self.file_name = os.path.join(lock_dir, 'naclports.lock')
self.fd = open(self.file_name, 'w')
def __enter__(self):
try:
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except Exception:
raise error.Error("Unable to acquire lock (%s): Is naclports already "
"running?" % self.file_name)
def __exit__(self, exc_type, exc_val, exc_tb):
os.remove(self.file_name)
self.fd.close()
class BuildLock(Lock):
"""Lock used when building a package (essentially a lock on OUT_DIR)"""
def __init__(self):
super(BuildLock, self).__init__(paths.OUT_DIR)
class InstallLock(Lock):
"""Lock used when installing/uninstalling package"""
def __init__(self, config):
root = GetInstallRoot(config)
super(InstallLock, self).__init__(root)
CheckStdoutForColorSupport()
| |
from . import VEXObject
import logging
l = logging.getLogger('pyvex.stmt')
class IRStmt(VEXObject):
"""
IR statements in VEX represents operations with side-effects.
"""
tag = None
def __init__(self):
VEXObject.__init__(self)
def pp(self):
print self.__str__()
@property
def expressions(self):
expressions = []
for k in self.__slots__:
v = getattr(self, k)
if isinstance(v, IRExpr):
expressions.append(v)
expressions.extend(v.child_expressions)
return expressions
@property
def constants(self):
return sum((e.constants for e in self.expressions), [])
@staticmethod
def _from_c(c_stmt):
if c_stmt[0] == ffi.NULL:
return None
tag_int = c_stmt.tag
try:
stmt_class = _tag_to_class[tag_int]
except KeyError:
raise PyVEXError('Unknown/unsupported IRStmtTag %s\n' % ints_to_enums[tag_int])
return stmt_class._from_c(c_stmt)
def typecheck(self, tyenv): # pylint: disable=unused-argument,no-self-use
return True
def __str__(self, reg_name=None, arch=None, tyenv=None):
raise NotImplementedError()
class NoOp(IRStmt):
"""
A no-operation statement. It is usually the result of an IR optimization.
"""
tag = 'Ist_NoOp'
def __init__(self): # pylint:disable=unused-argument
IRStmt.__init__(self)
def __str__(self, reg_name=None, arch=None, tyenv=None):
return "IR-NoOp"
@staticmethod
def _from_c(c_stmt):
return NoOp()
class IMark(IRStmt):
"""
An instruction mark. It marks the start of the statements that represent a single machine instruction (the end of
those statements is marked by the next IMark or the end of the IRSB). Contains the address and length of the
instruction.
"""
__slots__ = ['addr', 'len', 'delta']
tag = 'Ist_IMark'
def __init__(self, addr, length, delta):
IRStmt.__init__(self)
self.addr = addr
self.len = length
self.delta = delta
def __str__(self, reg_name=None, arch=None, tyenv=None):
return "------ IMark(0x%x, %d, %d) ------" % (self.addr, self.len, self.delta)
@staticmethod
def _from_c(c_stmt):
return IMark(c_stmt.Ist.IMark.addr,
c_stmt.Ist.IMark.len,
c_stmt.Ist.IMark.delta)
class AbiHint(IRStmt):
"""
An ABI hint, provides specific information about this platform's ABI.
"""
__slots__ = ['base', 'len', 'nia']
tag = 'Ist_AbiHint'
def __init__(self, base, length, nia):
IRStmt.__init__(self)
self.base = base
self.len = length
self.nia = nia
def __str__(self, reg_name=None, arch=None, tyenv=None):
return "====== AbiHint(0x%s, %d, %s) ======" % (self.base, self.len, self.nia)
@staticmethod
def _from_c(c_stmt):
return AbiHint(IRExpr._from_c(c_stmt.Ist.AbiHint.base),
c_stmt.Ist.AbiHint.len,
IRExpr._from_c(c_stmt.Ist.AbiHint.nia))
class Put(IRStmt):
"""
Write to a guest register, at a fixed offset in the guest state.
"""
__slots__ = ['data', 'offset']
tag = 'Ist_Put'
def __init__(self, data, offset):
IRStmt.__init__(self)
self.data = data
self.offset = offset
## TODO: Check if result_size and arch are available before looking of arch register name
def __str__(self, reg_name=None, arch=None, tyenv=None):
if arch is not None and tyenv is not None:
reg_name = arch.translate_register_name(self.offset, self.data.result_size(tyenv) / 8)
if reg_name is not None:
return "PUT(%s) = %s" % (reg_name, self.data)
else:
return "PUT(offset=%s) = %s" % (self.offset, self.data)
@staticmethod
def _from_c(c_stmt):
return Put(IRExpr._from_c(c_stmt.Ist.Put.data),
c_stmt.Ist.Put.offset)
def typecheck(self, tyenv):
return self.data.typecheck(tyenv)
class PutI(IRStmt):
"""
Write to a guest register, at a non-fixed offset in the guest state.
"""
__slots__ = ['descr', 'ix', 'data', 'bias']
tag = 'Ist_PutI'
def __init__(self, descr, ix, data, bias):
IRStmt.__init__(self)
self.descr = descr
self.ix = ix
self.data = data
self.bias = bias
def __str__(self, reg_name=None, arch=None, tyenv=None):
return "PutI(%s)[%s,%d] = %s" % (self.descr, self.ix, self.bias, self.data)
@staticmethod
def _from_c(c_stmt):
return PutI(IRRegArray._from_c(c_stmt.Ist.PutI.details.descr),
IRExpr._from_c(c_stmt.Ist.PutI.details.ix),
IRExpr._from_c(c_stmt.Ist.PutI.details.data),
c_stmt.Ist.PutI.details.bias)
def typecheck(self, tyenv):
dataty = self.data.typecheck(tyenv)
if dataty is None:
return False
if dataty != self.descr.elemTy:
l.debug("Expression doesn't match RegArray type")
return False
return True
class WrTmp(IRStmt):
"""
Assign a value to a temporary. Note that SSA rules require each tmp is only assigned to once. IR sanity checking
will reject any block containing a temporary which is not assigned to exactly once.
"""
__slots__ = ['data', 'tmp']
tag = 'Ist_WrTmp'
def __init__(self, tmp, data):
IRStmt.__init__(self)
self.tmp = tmp
self.data = data
def __str__(self, reg_name=None, arch=None, tyenv=None):
# Support for named register in string representation of expr.Get
if arch is not None and tyenv is not None and isinstance(self.data, Get):
reg_name = arch.translate_register_name(self.data.offset, self.data.result_size(tyenv) / 8)
if reg_name is not None and isinstance(self.data, expr.Get):
return "t%d = %s" % (self.tmp, self.data.__str__(reg_name=reg_name))
else:
return "t%d = %s" % (self.tmp, self.data)
@staticmethod
def _from_c(c_stmt):
return WrTmp(c_stmt.Ist.WrTmp.tmp,
IRExpr._from_c(c_stmt.Ist.WrTmp.data))
def typecheck(self, tyenv):
dataty = self.data.typecheck(tyenv)
if dataty is None:
return False
if dataty != tyenv.lookup(self.tmp):
l.debug("Expression doesn't match tmp type")
return False
return True
class Store(IRStmt):
"""
Write a value to memory..
"""
__slots__ = ['addr', 'data', 'end']
tag = 'Ist_Store'
def __init__(self, addr, data, end):
IRStmt.__init__(self)
self.addr = addr
self.data = data
self.end = end
@property
def endness(self):
return self.end
def __str__(self, reg_name=None, arch=None, tyenv=None):
return "ST%s(%s) = %s" % (self.endness[-2:].lower(), self.addr, self.data)
@staticmethod
def _from_c(c_stmt):
return Store(IRExpr._from_c(c_stmt.Ist.Store.addr),
IRExpr._from_c(c_stmt.Ist.Store.data),
ints_to_enums[c_stmt.Ist.Store.end])
def typecheck(self, tyenv):
dataty = self.data.typecheck(tyenv)
if dataty is None:
return False
addrty = self.addr.typecheck(tyenv)
if addrty is None:
return False
if addrty != tyenv.wordty:
l.debug("addr must be full word for arch")
return False
if self.end not in ('Iend_LE', 'Iend_BE'):
l.debug("invalid endness enum")
return False
return True
class CAS(IRStmt):
"""
an atomic compare-and-swap operation.
"""
__slots__ = ['addr', 'dataLo', 'dataHi', 'expdLo', 'expdHi', 'oldLo', 'oldHi', 'end']
tag = 'Ist_CAS'
def __init__(self, addr, dataLo, dataHi, expdLo, expdHi, oldLo, oldHi, end):
IRStmt.__init__(self)
self.addr = addr
self.dataLo = dataLo
self.dataHi = dataHi
self.expdLo = expdLo
self.expdHi = expdHi
self.oldLo = oldLo
self.oldHi = oldHi
self.end = end
@property
def endness(self):
return self.end
def __str__(self, reg_name=None, arch=None, tyenv=None):
return "t(%s,%s) = CAS%s(%s :: (%s,%s)->(%s,%s))" % (
self.oldLo, self.oldHi, self.end[-2:].lower(), self.addr, self.expdLo, self.expdHi, self.dataLo, self.dataHi)
@staticmethod
def _from_c(c_stmt):
return CAS(IRExpr._from_c(c_stmt.Ist.CAS.details.addr),
IRExpr._from_c(c_stmt.Ist.CAS.details.dataLo),
IRExpr._from_c(c_stmt.Ist.CAS.details.dataHi),
IRExpr._from_c(c_stmt.Ist.CAS.details.expdLo),
IRExpr._from_c(c_stmt.Ist.CAS.details.expdHi),
c_stmt.Ist.CAS.details.oldLo,
c_stmt.Ist.CAS.details.oldHi,
ints_to_enums[c_stmt.Ist.CAS.details.end])
def typecheck(self, tyenv):
addrty = self.addr.typecheck(tyenv)
if addrty is None:
return False
if addrty != tyenv.wordty:
l.debug("addr must be full word for arch")
return False
if self.end not in ('Iend_LE', 'Iend_BE'):
l.debug("invalid endness enum")
return False
if self.oldHi == 0xFFFFFFFF:
# single-element case
if self.expdHi is not None or self.dataHi is not None:
l.debug("expdHi and dataHi must be None")
return False
expdLoTy = self.expdLo.typecheck(tyenv)
dataLoTy = self.dataLo.typecheck(tyenv)
if expdLoTy is None or dataLoTy is None:
return False
if tyenv.lookup(self.oldLo) != expdLoTy or expdLoTy != dataLoTy:
l.debug("oldLo, expdL, dataLo must all have the same type")
return False
else:
# double-element case
expdLoTy = self.expdLo.typecheck(tyenv)
dataLoTy = self.dataLo.typecheck(tyenv)
expdHiTy = self.expdHi.typecheck(tyenv)
dataHiTy = self.dataHi.typecheck(tyenv)
if expdLoTy is None or dataLoTy is None or expdHiTy is None or dataHiTy is None:
return False
if tyenv.lookup(self.oldLo) != expdLoTy or expdLoTy != dataLoTy or \
tyenv.lookup(self.oldHi) != expdHiTy or expdHiTy != dataHiTy or \
expdLoTy != expdHiTy:
l.debug("oldLo, expdLo, dataLo, oldHi, expdHi, dataHi must all have the same type")
return False
return True
class LLSC(IRStmt):
"""
Either Load-Linked or Store-Conditional, depending on STOREDATA. If STOREDATA is NULL then this is a Load-Linked,
else it is a Store-Conditional.
"""
__slots__ = ['addr', 'storedata', 'result', 'end']
tag = 'Ist_LLSC'
def __init__(self, addr, storedata, result, end):
IRStmt.__init__(self)
self.addr = addr
self.storedata = storedata
self.result = result
self.end = end
@property
def endness(self):
return self.end
def __str__(self, reg_name=None, arch=None, tyenv=None):
if self.storedata is None:
return "t%d = LD%s-Linked(%s)" % (self.result, self.end[-2:].lower(), self.addr)
else:
return "t%d = ( ST%s-Cond(%s) = %s )" % (self.result, self.end[-2:].lower(), self.addr, self.storedata)
@staticmethod
def _from_c(c_stmt):
return LLSC(IRExpr._from_c(c_stmt.Ist.LLSC.addr),
IRExpr._from_c(c_stmt.Ist.LLSC.storedata),
c_stmt.Ist.LLSC.result,
ints_to_enums[c_stmt.Ist.LLSC.end])
def typecheck(self, tyenv):
addrty = self.addr.typecheck(tyenv)
if addrty is None:
return False
if addrty != tyenv.wordty:
l.debug("addr must be full word for arch")
return False
if self.end not in ('Iend_LE', 'Iend_BE'):
l.debug("invalid endness enum")
return False
if self.storedata is not None:
# load-linked
storety = self.storedata.typecheck(tyenv)
if storety is None:
return False
if tyenv.lookup(self.result) != 'Ity_I1':
l.debug("result tmp must be Ity_I1")
return False
return True
class MBE(IRStmt):
__slots__ = ['event']
tag = 'Ist_MBE'
def __init__(self, event):
IRStmt.__init__(self)
self.event = event
def __str__(self, reg_name=None, arch=None, tyenv=None):
return "MBusEvent-" + self.event
@staticmethod
def _from_c(c_stmt):
return MBE(ints_to_enums[c_stmt.Ist.MBE.event])
class Dirty(IRStmt):
__slots__ = ['cee', 'guard', 'args', 'tmp', 'mFx', 'mAddr', 'mSize', 'nFxState']
tag = 'Ist_Dirty'
def __init__(self, cee, guard, args, tmp, mFx, mAddr, mSize, nFxState):
IRStmt.__init__(self)
self.cee = cee
self.guard = guard
self.args = tuple(args)
self.tmp = tmp
self.mFx = mFx
self.mAddr = mAddr
self.mSize = mSize
self.nFxState = nFxState
def __str__(self, reg_name=None, arch=None, tyenv=None):
return "t%s = DIRTY %s %s ::: %s(%s)" % (
self.tmp, self.guard, "TODO(effects)", self.cee, ','.join(str(a) for a in self.args))
@property
def child_expressions(self):
expressions = sum((a.child_expressions for a in self.args), [])
expressions.extend(self.args)
expressions.append(self.guard)
expressions.extend(self.guard.child_expressions)
return expressions
@staticmethod
def _from_c(c_stmt):
args = []
for i in xrange(20):
a = c_stmt.Ist.Dirty.details.args[i]
if a == ffi.NULL:
break
args.append(IRExpr._from_c(a))
return Dirty(IRCallee._from_c(c_stmt.Ist.Dirty.details.cee),
IRExpr._from_c(c_stmt.Ist.Dirty.details.guard),
tuple(args),
c_stmt.Ist.Dirty.details.tmp,
ints_to_enums[c_stmt.Ist.Dirty.details.mFx],
IRExpr._from_c(c_stmt.Ist.Dirty.details.mAddr),
c_stmt.Ist.Dirty.details.mSize,
c_stmt.Ist.Dirty.details.nFxState)
class Exit(IRStmt):
"""
A conditional exit from the middle of an IRSB.
"""
__slots__ = ['guard', 'dst', 'offsIP', 'jk']
tag = 'Ist_Exit'
def __init__(self, guard, dst, jk, offsIP):
IRStmt.__init__(self)
self.guard = guard
self.dst = dst
self.offsIP = offsIP
self.jk = jk
@property
def jumpkind(self):
return self.jk
def __str__(self, reg_name=None, arch=None, tyenv=None):
if arch is not None and tyenv is not None:
reg_name = arch.translate_register_name(self.offsIP, arch.bits / 8)
if reg_name is None:
return "if (%s) { PUT(offset=%d) = %#x; %s }" % (self.guard, self.offsIP, self.dst.value, self.jumpkind)
else:
return "if (%s) { PUT(%s) = %#x; %s }" % (self.guard, reg_name, self.dst.value, self.jumpkind)
@property
def child_expressions(self):
return [self.guard, self.dst] + self.guard.child_expressions
@staticmethod
def _from_c(c_stmt):
return Exit(IRExpr._from_c(c_stmt.Ist.Exit.guard),
IRConst._from_c(c_stmt.Ist.Exit.dst),
ints_to_enums[c_stmt.Ist.Exit.jk],
c_stmt.Ist.Exit.offsIP)
def typecheck(self, tyenv):
if not self.jk.startswith("Ijk_"):
l.debug("Jumpkind is not a jumpkind enum")
return False
guardty = self.guard.typecheck(tyenv)
if guardty is None:
return False
if guardty != 'Ity_I1':
l.debug("guard must be Ity_I1")
return False
return True
class LoadG(IRStmt):
"""
A guarded load.
"""
__slots__ = ['addr', 'alt', 'guard', 'dst', 'cvt', 'end', 'cvt_types']
tag = 'Ist_LoadG'
def __init__(self, end, cvt, dst, addr, alt, guard):
IRStmt.__init__(self)
self.addr = addr
self.alt = alt
self.guard = guard
self.dst = dst
self.cvt = cvt
self.end = end
type_in = ffi.new('IRType *')
type_out = ffi.new('IRType *')
pvc.typeOfIRLoadGOp(enums_to_ints[self.cvt], type_out, type_in)
type_in = ffi.cast('int *', type_in)[0]
type_out = ffi.cast('int *', type_out)[0]
self.cvt_types = (ints_to_enums[type_in], ints_to_enums[type_out])
@property
def endness(self):
return self.end
def __str__(self, reg_name=None, arch=None, tyenv=None):
return "t%d = if (%s) %s(LD%s(%s)) else %s" % (
self.dst, self.guard, self.cvt, self.end[-2:].lower(), self.addr, self.alt)
@staticmethod
def _from_c(c_stmt):
return LoadG(ints_to_enums[c_stmt.Ist.LoadG.details.end],
ints_to_enums[c_stmt.Ist.LoadG.details.cvt],
c_stmt.Ist.LoadG.details.dst,
IRExpr._from_c(c_stmt.Ist.LoadG.details.addr),
IRExpr._from_c(c_stmt.Ist.LoadG.details.alt),
IRExpr._from_c(c_stmt.Ist.LoadG.details.guard))
def typecheck(self, tyenv):
addrty = self.addr.typecheck(tyenv)
if addrty is None:
return False
if addrty != tyenv.wordty:
l.debug("addr must be full word for arch")
return False
if self.end not in ('Iend_LE', 'Iend_BE'):
l.debug("invalid endness enum")
return False
dstty = tyenv.lookup(self.dst)
guardty = self.guard.typecheck(tyenv)
altty = self.alt.typecheck(tyenv)
if guardty is None or altty is None:
return False
if dstty != 'Ity_I32' or altty != 'Ity_I32':
l.debug('dst and alt must be Ity_I32')
return False
if guardty != 'Ity_I1':
l.debug('guard must be Ity_I1')
return False
if not self.cvt.startswith('ILGop_'):
l.debug("Invalid cvt enum")
return False
return True
class StoreG(IRStmt):
"""
A guarded store.
"""
__slots__ = ['addr', 'data', 'guard', 'end']
tag = 'Ist_StoreG'
def __init__(self, end, addr, data, guard):
IRStmt.__init__(self)
self.addr = addr
self.data = data
self.guard = guard
self.end = end
@property
def endness(self):
return self.end
def __str__(self, reg_name=None, arch=None, tyenv=None):
return "if (%s) ST%s(%s) = %s" % (self.guard, self.end[-2:].lower(), self.addr, self.data)
@staticmethod
def _from_c(c_stmt):
return StoreG(ints_to_enums[c_stmt.Ist.StoreG.details.end],
IRExpr._from_c(c_stmt.Ist.StoreG.details.addr),
IRExpr._from_c(c_stmt.Ist.StoreG.details.data),
IRExpr._from_c(c_stmt.Ist.StoreG.details.guard))
def typecheck(self, tyenv):
addrty = self.addr.typecheck(tyenv)
if addrty is None:
return False
if addrty != tyenv.wordty:
l.debug("addr must be full word for arch")
return False
if self.end not in ('Iend_LE', 'Iend_BE'):
l.debug("invalid endness enum")
return False
guardty = self.guard.typecheck(tyenv)
dataty = self.data.typecheck(tyenv)
if guardty is None or dataty is None:
return False
if guardty != 'Ity_I1':
l.debug('guard must be Ity_I1')
return False
return True
from .expr import IRExpr, Get
from .const import IRConst
from .enums import IRRegArray, ints_to_enums, enums_to_ints, IRCallee
from .errors import PyVEXError
from . import ffi, pvc, expr
_tag_to_class = {
enums_to_ints['Ist_NoOp']: NoOp,
enums_to_ints['Ist_IMark']: IMark,
enums_to_ints['Ist_AbiHint']: AbiHint,
enums_to_ints['Ist_Put']: Put,
enums_to_ints['Ist_PutI']: PutI,
enums_to_ints['Ist_WrTmp']: WrTmp,
enums_to_ints['Ist_Store']: Store,
enums_to_ints['Ist_LoadG']: LoadG,
enums_to_ints['Ist_StoreG']: StoreG,
enums_to_ints['Ist_CAS']: CAS,
enums_to_ints['Ist_LLSC']: LLSC,
enums_to_ints['Ist_Dirty']: Dirty,
enums_to_ints['Ist_MBE']: MBE,
enums_to_ints['Ist_Exit']: Exit,
}
| |
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver
from tensorflow.python.training import training as train
def _get_checkpoint_filename(filepattern):
"""Returns checkpoint filename given directory or specific filepattern."""
if gfile.IsDirectory(filepattern):
return saver.latest_checkpoint(filepattern)
return filepattern
def load_checkpoint(filepattern):
"""Returns CheckpointReader for latest checkpoint.
Args:
filepattern: Directory with checkpoints file or path to checkpoint.
Returns:
`CheckpointReader` object.
Raises:
ValueError: if checkpoint_dir doesn't have 'checkpoint' file or checkpoints.
"""
filename = _get_checkpoint_filename(filepattern)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % filepattern)
return train.NewCheckpointReader(filename)
def load_variable(checkpoint_dir, name):
"""Returns a Tensor with the contents of the given variable in the checkpoint.
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
name: Name of the tensor to return.
Returns:
`Tensor` object.
"""
reader = load_checkpoint(checkpoint_dir)
return reader.get_tensor(name)
def list_variables(checkpoint_dir):
"""Returns list of all variables in the latest checkpoint.
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(checkpoint_dir)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
# TODO(ipolosukhin): Refactor variable_scope module to provide nicer APIs.
def _set_checkpoint_initializer(variable, file_pattern, tensor_name, slice_spec,
name="checkpoint_initializer"):
"""Sets variable initializer to assign op form value in checkpoint's tensor.
Args:
variable: `Variable` object.
file_pattern: string, where to load checkpoints from.
tensor_name: Name of the `Tensor` to load from checkpoint reader.
slice_spec: Slice specification for loading partitioned variables.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
restore_op = gen_io_ops._restore_slice(
file_pattern,
tensor_name,
slice_spec,
base_type,
preferred_shard=-1,
name=name)
variable._initializer_op = state_ops.assign(variable, restore_op)
def _set_variable_or_list_initializer(variable_or_list, file_pattern,
tensor_name):
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
if slice_name is None:
slice_name = v._save_slice_info.full_name
elif slice_name != v._save_slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, v._save_slice_info.full_name))
_set_checkpoint_initializer(v, file_pattern, tensor_name,
v._save_slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, file_pattern, tensor_name, "")
def init_from_checkpoint(checkpoint_dir, assignment_map):
"""Using assingment map initializes current variables with loaded tensors.
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports next syntax:
`'scope_name/': 'checkpoint_scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching variable
names.
`'scope_name/variable_name': 'checkpoint_scope_name/some_other_variable'` -
will initalize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
`variable: 'scope_varaible_name'` - will initialize given variable with
variable from the checkpoint.
`'scope_name/': '/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
'<variable>/part_<part #>'.
Example:
```python
# Create variables.
with tf.variable_scope('test'):
m = tf.get_variable('my_var')
with tf.variable_scope('test2'):
var2 = tf.get_variable('my_var')
...
# Specify which variables to intialize from checkpoint.
init_from_checkpoint(checkpoint_dir, {
'test/my_var': 'some_var',
'test2/', 'some_scope/'})
...
# Or use `Variable` objects to identify what to initialize.
init_from_checkpoint(checkpoint_dir, {
var2: 'some_scope/var2',
})
...
# Initialize variables as usual.
session.run(tf.get_all_variables())
```
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of current variables
(in default graph) and values are names of the variables
in the checkpoint.
Raises:
tf.errors.OpError: If missing checkpoints or tensors in checkpoints.
ValueError: If missing variables in current graph.
"""
filepattern = _get_checkpoint_filename(checkpoint_dir)
reader = load_checkpoint(checkpoint_dir)
variable_map = reader.get_variable_to_shape_map()
for current_name, tensor_name in six.iteritems(assignment_map):
scopes = ""
var = None
# Check if this is Variable object.
if isinstance(current_name, variables.Variable):
var = current_name
else:
var_scope = vs._get_default_variable_store()
# Check if this is variable in var_store.
var = var_scope._vars.get(current_name, None)
# Also check if variable is partitioned as list.
if var is None:
if current_name + "/part_0" in var_scope._vars:
var = []
i = 0
while current_name + "/part_%d" % i in var_scope._vars:
var.append(var_scope._vars[current_name + "/part_%d" % i])
i += 1
if var is not None:
# If 1 to 1 mapping was provided, find variable in the scope.
if tensor_name not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint" % (
tensor_name, checkpoint_dir
))
if isinstance(var, variables.Variable):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(variable_map[tensor_name]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name, str(variable_map[tensor_name])
))
_set_variable_or_list_initializer(var, filepattern, tensor_name)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
current_name, checkpoint_dir, tensor_name
))
else:
if "/" in current_name:
scopes = current_name[:current_name.rindex("/")]
current_name = current_name[current_name.rindex("/") + 1:]
if not tensor_name.endswith("/"):
raise ValueError(
"Assignment map with scope only name (%s) "
"should map to scope only (%s). "
"Should be 'scope/': 'other_scope/'." % (
scopes, tensor_name
))
# If scope to scope mapping was provided, find all variables in the scope.
for var_name in var_scope._vars:
if var_name.startswith(scopes):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
if tensor_name != "/":
full_tensor_name = tensor_name + var_name[len(scopes) + 1:]
else:
full_tensor_name = var_name[len(scopes) + 1:]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:], tensor_name,
checkpoint_dir
))
var = var_scope._vars[var_name]
_set_variable_or_list_initializer(var, filepattern, full_tensor_name)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
var_name, checkpoint_dir, tensor_name
))
# pylint: enable=protected-access
| |
"""
fs.contrib.davfs
================
FS implementation accessing a WebDAV server.
This module provides a relatively-complete WebDAV Level 1 client that exposes
a WebDAV server as an FS object. Locks are not currently supported.
Requires the dexml module:
http://pypi.python.org/pypi/dexml/
"""
# Copyright (c) 2009-2010, Cloud Matrix Pty. Ltd.
# All rights reserved; available under the terms of the MIT License.
import os
import sys
import httplib
import socket
from urlparse import urlparse
import stat as statinfo
from urllib import quote as urlquote
from urllib import unquote as urlunquote
import base64
import re
import datetime
import cookielib
import fnmatch
import xml.dom.pulldom
import fs
from fs.base import *
from fs.path import *
from fs.errors import *
from fs.remote import RemoteFileBuffer
from fs.contrib.davfs.util import *
from fs.contrib.davfs import xmlobj
from fs.contrib.davfs.xmlobj import *
logger = fs.getLogger("fs.contrib.davfs")
import errno
_RETRYABLE_ERRORS = [errno.EADDRINUSE]
try:
_RETRYABLE_ERRORS.append(errno.ECONNRESET)
_RETRYABLE_ERRORS.append(errno.ECONNABORTED)
except AttributeError:
_RETRYABLE_ERRORS.append(104)
class DAVFS(FS):
"""Access a remote filesystem via WebDAV.
This FS implementation provides access to a remote filesystem via the
WebDAV protocol. Basic Level 1 WebDAV is supported; locking is not
currently supported, but planned for the future.
HTTP Basic authentication is supported; provide a dict giving username
and password in the "credentials" argument, or a callback for obtaining
one in the "get_credentials" argument.
To use custom HTTP connector classes (e.g. to implement proper certificate
checking for SSL connections) you can replace the factory functions in the
DAVFS.connection_classes dictionary, or provide the "connection_classes"
argument.
"""
connection_classes = {
"http": httplib.HTTPConnection,
"https": httplib.HTTPSConnection,
}
_meta = { 'virtual' : False,
'read_only' : False,
'unicode_paths' : True,
'case_insensitive_paths' : False,
'network' : True
}
def __init__(self,url,credentials=None,get_credentials=None,thread_synchronize=True,connection_classes=None,timeout=None):
"""DAVFS constructor.
The only required argument is the root url of the remote server. If
authentication is required, provide the 'credentials' keyword argument
and/or the 'get_credentials' keyword argument. The former is a dict
of credentials info, while the latter is a callback function returning
such a dict. Only HTTP Basic Auth is supported at this stage, so the
only useful keys in a credentials dict are 'username' and 'password'.
"""
if not url.endswith("/"):
url = url + "/"
self.url = url
self.timeout = timeout
self.credentials = credentials
self.get_credentials = get_credentials
if connection_classes is not None:
self.connection_classes = self.connection_classes.copy()
self.connection_classes.update(connection_classes)
self._connections = []
self._cookiejar = cookielib.CookieJar()
super(DAVFS,self).__init__(thread_synchronize=thread_synchronize)
# Check that the server speaks WebDAV, and normalize the URL
# after any redirects have been followed.
self.url = url
pf = propfind(prop="<prop xmlns='DAV:'><resourcetype /></prop>")
resp = self._request("/","PROPFIND",pf.render(),{"Depth":"0"})
try:
if resp.status == 404:
raise ResourceNotFoundError("/",msg="root url gives 404")
if resp.status in (401,403):
raise PermissionDeniedError("listdir (http %s)" % resp.status)
if resp.status != 207:
msg = "server at %s doesn't speak WebDAV" % (self.url,)
raise RemoteConnectionError("",msg=msg,details=resp.read())
finally:
resp.close()
self.url = resp.request_url
self._url_p = urlparse(self.url)
def close(self):
for con in self._connections:
con.close()
super(DAVFS,self).close()
def _add_connection(self,con):
self._connections.append(con)
def _del_connection(self,con):
try:
self._connections.remove(con)
except ValueError:
pass
else:
con.close()
def __str__(self):
return '<DAVFS: %s>' % (self.url,)
__repr__ = __str__
def __getstate__(self):
# Python2.5 cannot load pickled urlparse.ParseResult objects.
state = super(DAVFS,self).__getstate__()
del state["_url_p"]
# CookieJar objects contain a lock, so they can't be pickled.
del state["_cookiejar"]
return state
def __setstate__(self,state):
super(DAVFS,self).__setstate__(state)
self._url_p = urlparse(self.url)
self._cookiejar = cookielib.CookieJar()
def getpathurl(self, path, allow_none=False):
"""Convert a client-side path into a server-side URL."""
path = relpath(normpath(path))
if path.endswith("/"):
path = path[:-1]
if isinstance(path,unicode):
path = path.encode("utf8")
return self.url + urlquote(path)
def _url2path(self,url):
"""Convert a server-side URL into a client-side path."""
path = urlunquote(urlparse(url).path)
root = self._url_p.path
return path[len(root)-1:].decode("utf8")
def _isurl(self,path,url):
"""Check whether the given URL corresponds to the given local path."""
path = normpath(relpath(path))
upath = relpath(normpath(self._url2path(url)))
return path == upath
def _request(self,path,method,body="",headers={}):
"""Issue a HTTP request to the remote server.
This is a simple wrapper around httplib that does basic error and
sanity checking e.g. following redirects and providing authentication.
"""
url = self.getpathurl(path)
visited = []
resp = None
try:
resp = self._raw_request(url,method,body,headers)
# Loop to retry for redirects and authentication responses.
while resp.status in (301,302,401,403):
resp.close()
if resp.status in (301,302,):
visited.append(url)
url = resp.getheader("Location",None)
if not url:
raise OperationFailedError(msg="no location header in 301 response")
if url in visited:
raise OperationFailedError(msg="redirection seems to be looping")
if len(visited) > 10:
raise OperationFailedError("too much redirection")
elif resp.status in (401,403):
if self.get_credentials is None:
break
else:
creds = self.get_credentials(self.credentials)
if creds is None:
break
else:
self.credentials = creds
resp = self._raw_request(url,method,body,headers)
except Exception:
if resp is not None:
resp.close()
raise
resp.request_url = url
return resp
def _raw_request(self,url,method,body,headers,num_tries=0):
"""Perform a single HTTP request, without any error handling."""
if self.closed:
raise RemoteConnectionError("",msg="FS is closed")
if isinstance(url,basestring):
url = urlparse(url)
if self.credentials is not None:
username = self.credentials.get("username","")
password = self.credentials.get("password","")
if username is not None and password is not None:
creds = "%s:%s" % (username,password,)
creds = "Basic %s" % (base64.b64encode(creds).strip(),)
headers["Authorization"] = creds
(size,chunks) = normalize_req_body(body)
try:
try:
ConClass = self.connection_classes[url.scheme.lower()]
except KeyError:
msg = "unsupported protocol: '%s'" % (url.scheme,)
raise RemoteConnectionError(msg=msg)
logger.debug("DAVFS >REQ %s %s/%s",method,url.hostname,url.path)
con = ConClass(url.hostname,url.port,timeout=self.timeout)
self._add_connection(con)
try:
con.putrequest(method,url.path)
if size is not None:
con.putheader("Content-Length",str(size))
if hasattr(body,"md5"):
md5 = body.md5.decode("hex").encode("base64")
con.putheader("Content-MD5",md5)
for hdr,val in headers.iteritems():
con.putheader(hdr,val)
self._cookiejar.add_cookie_header(FakeReq(con,url.scheme,url.path))
con.endheaders()
for chunk in chunks:
con.send(chunk)
if self.closed:
raise RemoteConnectionError("",msg="FS is closed")
resp = con.getresponse()
self._cookiejar.extract_cookies(FakeResp(resp),FakeReq(con,url.scheme,url.path))
except Exception, e:
logger.exception("DAVFS <ERR %s %s/%s",method,url.hostname,url.path)
self._del_connection(con)
raise
else:
logger.debug("DAVFS <RESP %s %s %s/%s",resp.status,method,url.hostname,url.path)
old_close = resp.close
def new_close():
old_close()
self._del_connection(con)
resp.close = new_close
return resp
except socket.error, e:
if e.args[0] in _RETRYABLE_ERRORS:
if num_tries < 3:
num_tries += 1
return self._raw_request(url,method,body,headers,num_tries)
try:
msg = e.args[1]
except IndexError:
msg = str(e)
raise RemoteConnectionError("",msg=msg,details=e)
def setcontents(self,path, contents, chunk_size=1024*64):
resp = self._request(path,"PUT",contents)
resp.close()
if resp.status == 405:
raise ResourceInvalidError(path)
if resp.status == 409:
raise ParentDirectoryMissingError(path)
if resp.status not in (200,201,204):
raise_generic_error(resp,"setcontents",path)
def open(self,path,mode="r"):
mode = mode.replace("b","").replace("t","")
# Truncate the file if requested
contents = ""
if "w" in mode:
self.setcontents(path,contents)
else:
contents = self._request(path,"GET")
if contents.status == 404:
# Create the file if it's missing in append mode.
if "a" not in mode:
contents.close()
raise ResourceNotFoundError(path)
contents = ""
self.setcontents(path,contents)
elif contents.status in (401,403):
contents.close()
raise PermissionDeniedError("open")
elif contents.status != 200:
contents.close()
raise_generic_error(contents,"open",path)
elif self.isdir(path):
contents.close()
raise ResourceInvalidError(path)
# For streaming reads, return the socket contents directly.
if mode == "r-":
contents.size = contents.getheader("Content-Length",None)
if contents.size is not None:
try:
contents.size = int(contents.size)
except ValueError:
contents.size = None
if not hasattr(contents,"__exit__"):
contents.__enter__ = lambda *a: contents
contents.__exit__ = lambda *a: contents.close()
return contents
# For everything else, use a RemoteFileBuffer.
# This will take care of closing the socket when it's done.
return RemoteFileBuffer(self,path,mode,contents)
def exists(self,path):
pf = propfind(prop="<prop xmlns='DAV:'><resourcetype /></prop>")
response = self._request(path,"PROPFIND",pf.render(),{"Depth":"0"})
response.close()
if response.status == 207:
return True
if response.status == 404:
return False
raise_generic_error(response,"exists",path)
def isdir(self,path):
pf = propfind(prop="<prop xmlns='DAV:'><resourcetype /></prop>")
response = self._request(path,"PROPFIND",pf.render(),{"Depth":"0"})
try:
if response.status == 404:
return False
if response.status != 207:
raise_generic_error(response,"isdir",path)
body = response.read()
msres = multistatus.parse(body)
for res in msres.responses:
if self._isurl(path,res.href):
for ps in res.propstats:
if ps.props.getElementsByTagNameNS("DAV:","collection"):
return True
return False
finally:
response.close()
def isfile(self,path):
pf = propfind(prop="<prop xmlns='DAV:'><resourcetype /></prop>")
response = self._request(path,"PROPFIND",pf.render(),{"Depth":"0"})
try:
if response.status == 404:
return False
if response.status != 207:
raise_generic_error(response,"isfile",path)
msres = multistatus.parse(response.read())
for res in msres.responses:
if self._isurl(path,res.href):
for ps in res.propstats:
rt = ps.props.getElementsByTagNameNS("DAV:","resourcetype")
cl = ps.props.getElementsByTagNameNS("DAV:","collection")
if rt and not cl:
return True
return False
finally:
response.close()
def listdir(self,path="./",wildcard=None,full=False,absolute=False,dirs_only=False,files_only=False):
return list(self.ilistdir(path=path,wildcard=wildcard,full=full,absolute=absolute,dirs_only=dirs_only,files_only=files_only))
def ilistdir(self,path="./",wildcard=None,full=False,absolute=False,dirs_only=False,files_only=False):
props = "<D:resourcetype />"
dir_ok = False
for res in self._do_propfind(path,props):
if self._isurl(path,res.href):
# The directory itself, check it's actually a directory
for ps in res.propstats:
if ps.props.getElementsByTagNameNS("DAV:","collection"):
dir_ok = True
break
else:
nm = basename(self._url2path(res.href))
entry_ok = False
if dirs_only:
for ps in res.propstats:
if ps.props.getElementsByTagNameNS("DAV:","collection"):
entry_ok = True
break
elif files_only:
for ps in res.propstats:
if ps.props.getElementsByTagNameNS("DAV:","collection"):
break
else:
entry_ok = True
else:
entry_ok = True
if not entry_ok:
continue
if wildcard is not None:
if isinstance(wildcard,basestring):
if not fnmatch.fnmatch(nm,wildcard):
continue
else:
if not wildcard(nm):
continue
if full:
yield relpath(pathjoin(path,nm))
elif absolute:
yield abspath(pathjoin(path,nm))
else:
yield nm
if not dir_ok:
raise ResourceInvalidError(path)
def listdirinfo(self,path="./",wildcard=None,full=False,absolute=False,dirs_only=False,files_only=False):
return list(self.ilistdirinfo(path=path,wildcard=wildcard,full=full,absolute=absolute,dirs_only=dirs_only,files_only=files_only))
def ilistdirinfo(self,path="./",wildcard=None,full=False,absolute=False,dirs_only=False,files_only=False):
props = "<D:resourcetype /><D:getcontentlength />" \
"<D:getlastmodified /><D:getetag />"
dir_ok = False
for res in self._do_propfind(path,props):
if self._isurl(path,res.href):
# The directory itself, check it's actually a directory
for ps in res.propstats:
if ps.props.getElementsByTagNameNS("DAV:","collection"):
dir_ok = True
break
else:
# An entry in the directory, check if it's of the
# appropriate type and add to entries list as required.
info = self._info_from_propfind(res)
nm = basename(self._url2path(res.href))
entry_ok = False
if dirs_only:
for ps in res.propstats:
if ps.props.getElementsByTagNameNS("DAV:","collection"):
entry_ok = True
break
elif files_only:
for ps in res.propstats:
if ps.props.getElementsByTagNameNS("DAV:","collection"):
break
else:
entry_ok = True
else:
entry_ok = True
if not entry_ok:
continue
if wildcard is not None:
if isinstance(wildcard,basestring):
if not fnmatch.fnmatch(nm,wildcard):
continue
else:
if not wildcard(nm):
continue
if full:
yield (relpath(pathjoin(path,nm)),info)
elif absolute:
yield (abspath(pathjoin(path,nm)),info)
else:
yield (nm,info)
if not dir_ok:
raise ResourceInvalidError(path)
def makedir(self,path,recursive=False,allow_recreate=False):
response = self._request(path,"MKCOL")
response.close()
if response.status == 201:
return True
if response.status == 409:
if not recursive:
raise ParentDirectoryMissingError(path)
self.makedir(dirname(path),recursive=True,allow_recreate=True)
self.makedir(path,recursive=False,allow_recreate=allow_recreate)
return True
if response.status == 405:
if not self.isdir(path):
raise ResourceInvalidError(path)
if not allow_recreate:
raise DestinationExistsError(path)
return True
if response.status < 200 or response.status >= 300:
raise_generic_error(response,"makedir",path)
def remove(self,path):
if self.isdir(path):
raise ResourceInvalidError(path)
response = self._request(path,"DELETE")
response.close()
if response.status == 405:
raise ResourceInvalidError(path)
if response.status < 200 or response.status >= 300:
raise_generic_error(response,"remove",path)
return True
def removedir(self,path,recursive=False,force=False):
if self.isfile(path):
raise ResourceInvalidError(path)
if not force and self.listdir(path):
raise DirectoryNotEmptyError(path)
response = self._request(path,"DELETE")
response.close()
if response.status == 405:
raise ResourceInvalidError(path)
if response.status < 200 or response.status >= 300:
raise_generic_error(response,"removedir",path)
if recursive and path not in ("","/"):
try:
self.removedir(dirname(path),recursive=True)
except DirectoryNotEmptyError:
pass
return True
def rename(self,src,dst):
self._move(src,dst)
def getinfo(self,path):
info = {}
info["name"] = basename(path)
pf = propfind(prop="<prop xmlns='DAV:'><resourcetype /><getcontentlength /><getlastmodified /><getetag /></prop>")
response = self._request(path,"PROPFIND",pf.render(),{"Depth":"0"})
try:
if response.status != 207:
raise_generic_error(response,"getinfo",path)
msres = multistatus.parse(response.read())
for res in msres.responses:
if self._isurl(path,res.href):
info.update(self._info_from_propfind(res))
if "st_mode" not in info:
info["st_mode"] = 0700 | statinfo.S_IFREG
return info
finally:
response.close()
def _do_propfind(self,path,props):
"""Incremental PROPFIND parsing, for use with ilistdir/ilistdirinfo.
This generator method incrementally parses the results returned by
a PROPFIND, yielding each <response> object as it becomes available.
If the server is able to send responses in chunked encoding, then
this can substantially speed up iterating over the results.
"""
pf = propfind(prop="<prop xmlns:D='DAV:'>" + props + "</prop>")
response = self._request(path,"PROPFIND",pf.render(),{"Depth":"1"})
try:
if response.status == 404:
raise ResourceNotFoundError(path)
if response.status != 207:
raise_generic_error(response,"listdir",path)
xmlevents = xml.dom.pulldom.parse(response,bufsize=1024)
for (evt,node) in xmlevents:
if evt == xml.dom.pulldom.START_ELEMENT:
if node.namespaceURI == "DAV:":
if node.localName == "response":
xmlevents.expandNode(node)
yield xmlobj.response.parse(node)
finally:
response.close()
def _info_from_propfind(self,res):
info = {}
for ps in res.propstats:
findElements = ps.props.getElementsByTagNameNS
# TODO: should check for status of the propfind first...
# check for directory indicator
if findElements("DAV:","collection"):
info["st_mode"] = 0700 | statinfo.S_IFDIR
# check for content length
cl = findElements("DAV:","getcontentlength")
if cl:
cl = "".join(c.nodeValue for c in cl[0].childNodes)
try:
info["size"] = int(cl)
except ValueError:
pass
# check for last modified time
lm = findElements("DAV:","getlastmodified")
if lm:
lm = "".join(c.nodeValue for c in lm[0].childNodes)
try:
# TODO: more robust datetime parsing
fmt = "%a, %d %b %Y %H:%M:%S GMT"
mtime = datetime.datetime.strptime(lm,fmt)
info["modified_time"] = mtime
except ValueError:
pass
# check for etag
etag = findElements("DAV:","getetag")
if etag:
etag = "".join(c.nodeValue for c in etag[0].childNodes)
if etag:
info["etag"] = etag
if "st_mode" not in info:
info["st_mode"] = 0700 | statinfo.S_IFREG
return info
def copy(self,src,dst,overwrite=False,chunk_size=None):
if self.isdir(src):
msg = "Source is not a file: %(path)s"
raise ResourceInvalidError(src, msg=msg)
self._copy(src,dst,overwrite=overwrite)
def copydir(self,src,dst,overwrite=False,ignore_errors=False,chunk_size=0):
if self.isfile(src):
msg = "Source is not a directory: %(path)s"
raise ResourceInvalidError(src, msg=msg)
self._copy(src,dst,overwrite=overwrite)
def _copy(self,src,dst,overwrite=False):
headers = {"Destination":self.getpathurl(dst)}
if overwrite:
headers["Overwrite"] = "T"
else:
headers["Overwrite"] = "F"
response = self._request(src,"COPY",headers=headers)
response.close()
if response.status == 412:
raise DestinationExistsError(dst)
if response.status == 409:
raise ParentDirectoryMissingError(dst)
if response.status < 200 or response.status >= 300:
raise_generic_error(response,"copy",src)
def move(self,src,dst,overwrite=False,chunk_size=None):
if self.isdir(src):
msg = "Source is not a file: %(path)s"
raise ResourceInvalidError(src, msg=msg)
self._move(src,dst,overwrite=overwrite)
def movedir(self,src,dst,overwrite=False,ignore_errors=False,chunk_size=0):
if self.isfile(src):
msg = "Source is not a directory: %(path)s"
raise ResourceInvalidError(src, msg=msg)
self._move(src,dst,overwrite=overwrite)
def _move(self,src,dst,overwrite=False):
headers = {"Destination":self.getpathurl(dst)}
if overwrite:
headers["Overwrite"] = "T"
else:
headers["Overwrite"] = "F"
response = self._request(src,"MOVE",headers=headers)
response.close()
if response.status == 412:
raise DestinationExistsError(dst)
if response.status == 409:
raise ParentDirectoryMissingError(dst)
if response.status < 200 or response.status >= 300:
raise_generic_error(response,"move",src)
@staticmethod
def _split_xattr(name):
"""Split extended attribute name into (namespace,localName) pair."""
idx = len(name)-1
while idx >= 0 and name[idx].isalnum():
idx -= 1
return (name[:idx+1],name[idx+1:])
def getxattr(self,path,name,default=None):
(namespaceURI,localName) = self._split_xattr(name)
# TODO: encode xml character entities in the namespace
if namespaceURI:
pf = propfind(prop="<prop xmlns='"+namespaceURI+"'><"+localName+" /></prop>")
else:
pf = propfind(prop="<prop><"+localName+" /></prop>")
response = self._request(path,"PROPFIND",pf.render(),{"Depth":"0"})
try:
if response.status != 207:
raise_generic_error(response,"getxattr",path)
msres = multistatus.parse(response.read())
finally:
response.close()
for res in msres.responses:
if self._isurl(path,res.href):
for ps in res.propstats:
if namespaceURI:
findElements = ps.props.getElementsByTagNameNS
propNode = findElements(namespaceURI,localName)
else:
findElements = ps.props.getElementsByTagName
propNode = findElements(localName)
if propNode:
propNode = propNode[0]
if ps.status.code == 200:
return "".join(c.toxml() for c in propNode.childNodes)
if ps.status.code == 404:
return default
raise OperationFailedError("getxattr",msres.render())
return default
def setxattr(self,path,name,value):
(namespaceURI,localName) = self._split_xattr(name)
# TODO: encode xml character entities in the namespace
if namespaceURI:
p = "<%s xmlns='%s'>%s</%s>" % (localName,namespaceURI,value,localName)
else:
p = "<%s>%s</%s>" % (localName,value,localName)
pu = propertyupdate()
pu.commands.append(set(props="<prop>"+p+"</prop>"))
response = self._request(path,"PROPPATCH",pu.render(),{"Depth":"0"})
response.close()
if response.status < 200 or response.status >= 300:
raise_generic_error(response,"setxattr",path)
def delxattr(self,path,name):
(namespaceURI,localName) = self._split_xattr(name)
# TODO: encode xml character entities in the namespace
if namespaceURI:
p = "<%s xmlns='%s' />" % (localName,namespaceURI,)
else:
p = "<%s />" % (localName,)
pu = propertyupdate()
pu.commands.append(remove(props="<prop>"+p+"</prop>"))
response = self._request(path,"PROPPATCH",pu.render(),{"Depth":"0"})
response.close()
if response.status < 200 or response.status >= 300:
raise_generic_error(response,"delxattr",path)
def listxattrs(self,path):
pf = propfind(propname=True)
response = self._request(path,"PROPFIND",pf.render(),{"Depth":"0"})
try:
if response.status != 207:
raise_generic_error(response,"listxattrs",path)
msres = multistatus.parse(response.read())
finally:
response.close()
props = []
for res in msres.responses:
if self._isurl(path,res.href):
for ps in res.propstats:
for node in ps.props.childNodes:
if node.nodeType != node.ELEMENT_NODE:
continue
if node.namespaceURI:
if node.namespaceURI in ("DAV:","PYFS:",):
continue
propname = node.namespaceURI + node.localName
else:
propname = node.nodeName
props.append(propname)
return props
# TODO: bulk getxattrs() and setxattrs() methods
def raise_generic_error(response,opname,path):
if response.status == 404:
raise ResourceNotFoundError(path,details=response.read())
if response.status in (401,403):
raise PermissionDeniedError(opname,details=response.read())
if response.status == 423:
raise ResourceLockedError(path,opname=opname,details=response.read())
if response.status == 501:
raise UnsupportedError(opname,details=response.read())
if response.status == 405:
raise ResourceInvalidError(path,opname=opname,details=response.read())
raise OperationFailedError(opname,msg="Server Error: %s" % (response.status,),details=response.read())
| |
#!/usr/bin/env python
"""
C declarations, CPP macros, and C functions for f2py2e.
Only required declarations/macros/functions will be used.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 11:42:34 $
Pearu Peterson
"""
__version__ = "$Revision: 1.75 $"[10:-1]
import __version__
f2py_version = __version__.version
import types,sys,copy,os
errmess=sys.stderr.write
##################### Definitions ##################
outneeds={'includes0':[],'includes':[],'typedefs':[],'typedefs_generated':[],
'userincludes':[],
'cppmacros':[],'cfuncs':[],'callbacks':[],'f90modhooks':[],
'commonhooks':[]}
needs={}
includes0={'includes0':'/*need_includes0*/'}
includes={'includes':'/*need_includes*/'}
userincludes={'userincludes':'/*need_userincludes*/'}
typedefs={'typedefs':'/*need_typedefs*/'}
typedefs_generated={'typedefs_generated':'/*need_typedefs_generated*/'}
cppmacros={'cppmacros':'/*need_cppmacros*/'}
cfuncs={'cfuncs':'/*need_cfuncs*/'}
callbacks={'callbacks':'/*need_callbacks*/'}
f90modhooks={'f90modhooks':'/*need_f90modhooks*/',
'initf90modhooksstatic':'/*initf90modhooksstatic*/',
'initf90modhooksdynamic':'/*initf90modhooksdynamic*/',
}
commonhooks={'commonhooks':'/*need_commonhooks*/',
'initcommonhooks':'/*need_initcommonhooks*/',
}
############ Includes ###################
includes0['math.h']='#include <math.h>'
includes0['string.h']='#include <string.h>'
includes0['setjmp.h']='#include <setjmp.h>'
includes['Python.h']='#include "Python.h"'
needs['arrayobject.h']=['Python.h']
includes['arrayobject.h']='''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API
#include "arrayobject.h"'''
includes['arrayobject.h']='#include "fortranobject.h"'
############# Type definitions ###############
typedefs['unsigned_char']='typedef unsigned char unsigned_char;'
typedefs['unsigned_short']='typedef unsigned short unsigned_short;'
typedefs['unsigned_long']='typedef unsigned long unsigned_long;'
typedefs['signed_char']='typedef signed char signed_char;'
typedefs['long_long']="""\
#ifdef _WIN32
typedef __int64 long_long;
#else
typedef long long long_long;
typedef unsigned long long unsigned_long_long;
#endif
"""
typedefs['insinged_long_long']="""\
#ifdef _WIN32
typedef __uint64 long_long;
#else
typedef unsigned long long unsigned_long_long;
#endif
"""
typedefs['long_double']="""\
#ifndef _LONG_DOUBLE
typedef long double long_double;
#endif
"""
typedefs['complex_long_double']='typedef struct {long double r,i;} complex_long_double;'
typedefs['complex_float']='typedef struct {float r,i;} complex_float;'
typedefs['complex_double']='typedef struct {double r,i;} complex_double;'
typedefs['string']="""typedef char * string;"""
############### CPP macros ####################
cppmacros['CFUNCSMESS']="""\
#ifdef DEBUGCFUNCS
#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess);
#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\
\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
\tfprintf(stderr,\"\\n\");
#else
#define CFUNCSMESS(mess)
#define CFUNCSMESSPY(mess,obj)
#endif
"""
cppmacros['F_FUNC']="""\
#if defined(PREPEND_FORTRAN)
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) _##F
#else
#define F_FUNC(f,F) _##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) _##F##_
#else
#define F_FUNC(f,F) _##f##_
#endif
#endif
#else
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) F
#else
#define F_FUNC(f,F) f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) F##_
#else
#define F_FUNC(f,F) f##_
#endif
#endif
#endif
#if defined(UNDERSCORE_G77)
#define F_FUNC_US(f,F) F_FUNC(f##_,F##_)
#else
#define F_FUNC_US(f,F) F_FUNC(f,F)
#endif
"""
cppmacros['F_WRAPPEDFUNC']="""\
#if defined(PREPEND_FORTRAN)
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F
#else
#define F_WRAPPEDFUNC(f,F) _f2pywrap##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_
#else
#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_
#endif
#endif
#else
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F
#else
#define F_WRAPPEDFUNC(f,F) f2pywrap##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_
#else
#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_
#endif
#endif
#endif
#if defined(UNDERSCORE_G77)
#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_)
#else
#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F)
#endif
"""
cppmacros['F_MODFUNC']="""\
#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f
#else
#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _
#endif
#endif
#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f
#else
#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _
#endif
#endif
#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) f ## .in. ## m
#else
#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _
#endif
#endif
/*
#if defined(UPPERCASE_FORTRAN)
#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F)
#else
#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f)
#endif
*/
#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f))
"""
cppmacros['SWAPUNSAFE']="""\
#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\
(size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\
(size_t)(a) = ((size_t)(a) ^ (size_t)(b))
"""
cppmacros['SWAP']="""\
#define SWAP(a,b,t) {\\
\tt *c;\\
\tc = a;\\
\ta = b;\\
\tb = c;}
"""
#cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) ((m)->flags & NPY_CONTIGUOUS)'
cppmacros['PRINTPYOBJERR']="""\
#define PRINTPYOBJERR(obj)\\
\tfprintf(stderr,\"#modulename#.error is related to \");\\
\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
\tfprintf(stderr,\"\\n\");
"""
cppmacros['MINMAX']="""\
#ifndef MAX
#define MAX(a,b) ((a > b) ? (a) : (b))
#endif
#ifndef MIN
#define MIN(a,b) ((a < b) ? (a) : (b))
#endif
"""
cppmacros['len..']="""\
#define rank(var) var ## _Rank
#define shape(var,dim) var ## _Dims[dim]
#define old_rank(var) (((PyArrayObject *)(capi_ ## var ## _tmp))->nd)
#define old_shape(var,dim) (((PyArrayObject *)(capi_ ## var ## _tmp))->dimensions[dim])
#define fshape(var,dim) shape(var,rank(var)-dim-1)
#define len(var) shape(var,0)
#define flen(var) fshape(var,0)
#define size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp))
/* #define index(i) capi_i ## i */
#define slen(var) capi_ ## var ## _len
"""
cppmacros['pyobj_from_char1']='#define pyobj_from_char1(v) (PyInt_FromLong(v))'
cppmacros['pyobj_from_short1']='#define pyobj_from_short1(v) (PyInt_FromLong(v))'
needs['pyobj_from_int1']=['signed_char']
cppmacros['pyobj_from_int1']='#define pyobj_from_int1(v) (PyInt_FromLong(v))'
cppmacros['pyobj_from_long1']='#define pyobj_from_long1(v) (PyLong_FromLong(v))'
needs['pyobj_from_long_long1']=['long_long']
cppmacros['pyobj_from_long_long1']="""\
#ifdef HAVE_LONG_LONG
#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v))
#else
#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long.
#define pyobj_from_long_long1(v) (PyLong_FromLong(v))
#endif
"""
needs['pyobj_from_long_double1']=['long_double']
cppmacros['pyobj_from_long_double1']='#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))'
cppmacros['pyobj_from_double1']='#define pyobj_from_double1(v) (PyFloat_FromDouble(v))'
cppmacros['pyobj_from_float1']='#define pyobj_from_float1(v) (PyFloat_FromDouble(v))'
needs['pyobj_from_complex_long_double1']=['complex_long_double']
cppmacros['pyobj_from_complex_long_double1']='#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_complex_double1']=['complex_double']
cppmacros['pyobj_from_complex_double1']='#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_complex_float1']=['complex_float']
cppmacros['pyobj_from_complex_float1']='#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_string1']=['string']
cppmacros['pyobj_from_string1']='#define pyobj_from_string1(v) (PyString_FromString((char *)v))'
needs['TRYPYARRAYTEMPLATE']=['PRINTPYOBJERR']
cppmacros['TRYPYARRAYTEMPLATE']="""\
/* New SciPy */
#define TRYPYARRAYTEMPLATECHAR case PyArray_STRING: *(char *)(arr->data)=*v; break;
#define TRYPYARRAYTEMPLATELONG case PyArray_LONG: *(long *)(arr->data)=*v; break;
#define TRYPYARRAYTEMPLATEOBJECT case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data); break;
#define TRYPYARRAYTEMPLATE(ctype,typecode) \\
PyArrayObject *arr = NULL;\\
if (!obj) return -2;\\
if (!PyArray_Check(obj)) return -1;\\
if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
if (arr->descr->type==typecode) {*(ctype *)(arr->data)=*v; return 1;}\\
switch (arr->descr->type_num) {\\
case PyArray_DOUBLE: *(double *)(arr->data)=*v; break;\\
case PyArray_INT: *(int *)(arr->data)=*v; break;\\
case PyArray_LONG: *(long *)(arr->data)=*v; break;\\
case PyArray_FLOAT: *(float *)(arr->data)=*v; break;\\
case PyArray_CDOUBLE: *(double *)(arr->data)=*v; break;\\
case PyArray_CFLOAT: *(float *)(arr->data)=*v; break;\\
case PyArray_BOOL: *(npy_bool *)(arr->data)=(*v!=0); break;\\
case PyArray_UBYTE: *(unsigned char *)(arr->data)=*v; break;\\
case PyArray_BYTE: *(signed char *)(arr->data)=*v; break;\\
case PyArray_SHORT: *(short *)(arr->data)=*v; break;\\
case PyArray_USHORT: *(npy_ushort *)(arr->data)=*v; break;\\
case PyArray_UINT: *(npy_uint *)(arr->data)=*v; break;\\
case PyArray_ULONG: *(npy_ulong *)(arr->data)=*v; break;\\
case PyArray_LONGLONG: *(npy_longlong *)(arr->data)=*v; break;\\
case PyArray_ULONGLONG: *(npy_ulonglong *)(arr->data)=*v; break;\\
case PyArray_LONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\
case PyArray_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\
case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data, arr); break;\\
default: return -2;\\
};\\
return 1
"""
needs['TRYCOMPLEXPYARRAYTEMPLATE']=['PRINTPYOBJERR']
cppmacros['TRYCOMPLEXPYARRAYTEMPLATE']="""\
#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break;
#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\
PyArrayObject *arr = NULL;\\
if (!obj) return -2;\\
if (!PyArray_Check(obj)) return -1;\\
if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
if (arr->descr->type==typecode) {\\
*(ctype *)(arr->data)=(*v).r;\\
*(ctype *)(arr->data+sizeof(ctype))=(*v).i;\\
return 1;\\
}\\
switch (arr->descr->type_num) {\\
case PyArray_CDOUBLE: *(double *)(arr->data)=(*v).r;*(double *)(arr->data+sizeof(double))=(*v).i;break;\\
case PyArray_CFLOAT: *(float *)(arr->data)=(*v).r;*(float *)(arr->data+sizeof(float))=(*v).i;break;\\
case PyArray_DOUBLE: *(double *)(arr->data)=(*v).r; break;\\
case PyArray_LONG: *(long *)(arr->data)=(*v).r; break;\\
case PyArray_FLOAT: *(float *)(arr->data)=(*v).r; break;\\
case PyArray_INT: *(int *)(arr->data)=(*v).r; break;\\
case PyArray_SHORT: *(short *)(arr->data)=(*v).r; break;\\
case PyArray_UBYTE: *(unsigned char *)(arr->data)=(*v).r; break;\\
case PyArray_BYTE: *(signed char *)(arr->data)=(*v).r; break;\\
case PyArray_BOOL: *(npy_bool *)(arr->data)=((*v).r!=0 && (*v).i!=0)); break;\\
case PyArray_UBYTE: *(unsigned char *)(arr->data)=(*v).r; break;\\
case PyArray_BYTE: *(signed char *)(arr->data)=(*v).r; break;\\
case PyArray_SHORT: *(short *)(arr->data)=(*v).r; break;\\
case PyArray_USHORT: *(npy_ushort *)(arr->data)=(*v).r; break;\\
case PyArray_UINT: *(npy_uint *)(arr->data)=(*v).r; break;\\
case PyArray_ULONG: *(npy_ulong *)(arr->data)=(*v).r; break;\\
case PyArray_LONGLONG: *(npy_longlong *)(arr->data)=(*v).r; break;\\
case PyArray_ULONGLONG: *(npy_ulonglong *)(arr->data)=(*v).r; break;\\
case PyArray_LONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r; break;\\
case PyArray_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r;*(npy_longdouble *)(arr->data+sizeof(npy_longdouble))=(*v).i;break;\\
case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break;\\
default: return -2;\\
};\\
return -1;
"""
## cppmacros['NUMFROMARROBJ']="""\
## #define NUMFROMARROBJ(typenum,ctype) \\
## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
## \tif (arr) {\\
## \t\tif (arr->descr->type_num==PyArray_OBJECT) {\\
## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\
## \t\t\tgoto capi_fail;\\
## \t\t} else {\\
## \t\t\t(arr->descr->cast[typenum])(arr->data,1,(char*)v,1,1);\\
## \t\t}\\
## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
## \t\treturn 1;\\
## \t}
## """
## #XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ
## cppmacros['CNUMFROMARROBJ']="""\
## #define CNUMFROMARROBJ(typenum,ctype) \\
## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
## \tif (arr) {\\
## \t\tif (arr->descr->type_num==PyArray_OBJECT) {\\
## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\
## \t\t\tgoto capi_fail;\\
## \t\t} else {\\
## \t\t\t(arr->descr->cast[typenum])((void *)(arr->data),1,(void *)(v),1,1);\\
## \t\t}\\
## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
## \t\treturn 1;\\
## \t}
## """
needs['GETSTRFROMPYTUPLE']=['STRINGCOPYN','PRINTPYOBJERR']
cppmacros['GETSTRFROMPYTUPLE']="""\
#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\
\t\tPyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\
\t\tif (rv_cb_str == NULL)\\
\t\t\tgoto capi_fail;\\
\t\tif (PyString_Check(rv_cb_str)) {\\
\t\t\tstr[len-1]='\\0';\\
\t\t\tSTRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\
\t\t} else {\\
\t\t\tPRINTPYOBJERR(rv_cb_str);\\
\t\t\tPyErr_SetString(#modulename#_error,\"string object expected\");\\
\t\t\tgoto capi_fail;\\
\t\t}\\
\t}
"""
cppmacros['GETSCALARFROMPYTUPLE']="""\
#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\
\t\tif ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\
\t\tif (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\
\t\t\tgoto capi_fail;\\
\t}
"""
cppmacros['FAILNULL']="""\\
#define FAILNULL(p) do { \\
if ((p) == NULL) { \\
PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\
goto capi_fail; \\
} \\
} while (0)
"""
needs['MEMCOPY']=['string.h', 'FAILNULL']
cppmacros['MEMCOPY']="""\
#define MEMCOPY(to,from,n)\\
do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0)
"""
cppmacros['STRINGMALLOC']="""\
#define STRINGMALLOC(str,len)\\
\tif ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\
\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");\\
\t\tgoto capi_fail;\\
\t} else {\\
\t\t(str)[len] = '\\0';\\
\t}
"""
cppmacros['STRINGFREE']="""\
#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0)
"""
needs['STRINGCOPYN']=['string.h', 'FAILNULL']
cppmacros['STRINGCOPYN']="""\
#define STRINGCOPYN(to,from,buf_size) \\
do { \\
int _m = (buf_size); \\
char *_to = (to); \\
char *_from = (from); \\
FAILNULL(_to); FAILNULL(_from); \\
(void)strncpy(_to, _from, sizeof(char)*_m); \\
_to[_m-1] = '\\0'; \\
/* Padding with spaces instead of nulls */ \\
for (_m -= 2; _m >= 0 && _to[_m] == '\\0'; _m--) { \\
_to[_m] = ' '; \\
} \\
} while (0)
"""
needs['STRINGCOPY']=['string.h', 'FAILNULL']
cppmacros['STRINGCOPY']="""\
#define STRINGCOPY(to,from)\\
do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0)
"""
cppmacros['CHECKGENERIC']="""\
#define CHECKGENERIC(check,tcheck,name) \\
\tif (!(check)) {\\
\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
\t\t/*goto capi_fail;*/\\
\t} else """
cppmacros['CHECKARRAY']="""\
#define CHECKARRAY(check,tcheck,name) \\
\tif (!(check)) {\\
\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
\t\t/*goto capi_fail;*/\\
\t} else """
cppmacros['CHECKSTRING']="""\
#define CHECKSTRING(check,tcheck,name,show,var)\\
\tif (!(check)) {\\
\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
\t\tfprintf(stderr,show\"\\n\",slen(var),var);\\
\t\t/*goto capi_fail;*/\\
\t} else """
cppmacros['CHECKSCALAR']="""\
#define CHECKSCALAR(check,tcheck,name,show,var)\\
\tif (!(check)) {\\
\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
\t\tfprintf(stderr,show\"\\n\",var);\\
\t\t/*goto capi_fail;*/\\
\t} else """
## cppmacros['CHECKDIMS']="""\
## #define CHECKDIMS(dims,rank) \\
## \tfor (int i=0;i<(rank);i++)\\
## \t\tif (dims[i]<0) {\\
## \t\t\tfprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\
## \t\t\tgoto capi_fail;\\
## \t\t}
## """
cppmacros['ARRSIZE']='#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))'
cppmacros['OLDPYNUM']="""\
#ifdef OLDPYNUM
#error You need to intall Numeric Python version 13 or higher. Get it from http:/sourceforge.net/project/?group_id=1369
#endif
"""
################# C functions ###############
cfuncs['calcarrindex']="""\
static int calcarrindex(int *i,PyArrayObject *arr) {
\tint k,ii = i[0];
\tfor (k=1; k < arr->nd; k++)
\t\tii += (ii*(arr->dimensions[k] - 1)+i[k]); /* assuming contiguous arr */
\treturn ii;
}"""
cfuncs['calcarrindextr']="""\
static int calcarrindextr(int *i,PyArrayObject *arr) {
\tint k,ii = i[arr->nd-1];
\tfor (k=1; k < arr->nd; k++)
\t\tii += (ii*(arr->dimensions[arr->nd-k-1] - 1)+i[arr->nd-k-1]); /* assuming contiguous arr */
\treturn ii;
}"""
cfuncs['forcomb']="""\
static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache;
static int initforcomb(npy_intp *dims,int nd,int tr) {
int k;
if (dims==NULL) return 0;
if (nd<0) return 0;
forcombcache.nd = nd;
forcombcache.d = dims;
forcombcache.tr = tr;
if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
for (k=1;k<nd;k++) {
forcombcache.i[k] = forcombcache.i_tr[nd-k-1] = 0;
}
forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1;
return 1;
}
static int *nextforcomb(void) {
int j,*i,*i_tr,k;
int nd=forcombcache.nd;
if ((i=forcombcache.i) == NULL) return NULL;
if ((i_tr=forcombcache.i_tr) == NULL) return NULL;
if (forcombcache.d == NULL) return NULL;
i[0]++;
if (i[0]==forcombcache.d[0]) {
j=1;
while ((j<nd) && (i[j]==forcombcache.d[j]-1)) j++;
if (j==nd) {
free(i);
free(i_tr);
return NULL;
}
for (k=0;k<j;k++) i[k] = i_tr[nd-k-1] = 0;
i[j]++;
i_tr[nd-j-1]++;
} else
i_tr[nd-1]++;
if (forcombcache.tr) return i_tr;
return i;
}"""
needs['try_pyarr_from_string']=['STRINGCOPYN','PRINTPYOBJERR','string']
cfuncs['try_pyarr_from_string']="""\
static int try_pyarr_from_string(PyObject *obj,const string str) {
\tPyArrayObject *arr = NULL;
\tif (PyArray_Check(obj) && (!((arr = (PyArrayObject *)obj) == NULL)))
\t\t{ STRINGCOPYN(arr->data,str,PyArray_NBYTES(arr)); }
\treturn 1;
capi_fail:
\tPRINTPYOBJERR(obj);
\tPyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\");
\treturn 0;
}
"""
needs['string_from_pyobj']=['string','STRINGMALLOC','STRINGCOPYN']
cfuncs['string_from_pyobj']="""\
static int string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess) {
\tPyArrayObject *arr = NULL;
\tPyObject *tmp = NULL;
#ifdef DEBUGCFUNCS
fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(char*)str,*len,(char *)inistr,obj);
#endif
\tif (obj == Py_None) {
\t\tif (*len == -1)
\t\t\t*len = strlen(inistr); /* Will this cause problems? */
\t\tSTRINGMALLOC(*str,*len);
\t\tSTRINGCOPYN(*str,inistr,*len+1);
\t\treturn 1;
\t}
\tif (PyArray_Check(obj)) {
\t\tif ((arr = (PyArrayObject *)obj) == NULL)
\t\t\tgoto capi_fail;
\t\tif (!ISCONTIGUOUS(arr)) {
\t\t\tPyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\");
\t\t\tgoto capi_fail;
\t\t}
\t\tif (*len == -1)
\t\t\t*len = (arr->descr->elsize)*PyArray_SIZE(arr);
\t\tSTRINGMALLOC(*str,*len);
\t\tSTRINGCOPYN(*str,arr->data,*len+1);
\t\treturn 1;
\t}
\tif (PyString_Check(obj)) {
\t\ttmp = obj;
\t\tPy_INCREF(tmp);
\t}
\telse
\t\ttmp = PyObject_Str(obj);
\tif (tmp == NULL) goto capi_fail;
\tif (*len == -1)
\t\t*len = PyString_GET_SIZE(tmp);
\tSTRINGMALLOC(*str,*len);
\tSTRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1);
\tPy_DECREF(tmp);
\treturn 1;
capi_fail:
\tPy_XDECREF(tmp);
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL) err = #modulename#_error;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
needs['char_from_pyobj']=['int_from_pyobj']
cfuncs['char_from_pyobj']="""\
static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) {
\tint i=0;
\tif (int_from_pyobj(&i,obj,errmess)) {
\t\t*v = (char)i;
\t\treturn 1;
\t}
\treturn 0;
}
"""
needs['signed_char_from_pyobj']=['int_from_pyobj','signed_char']
cfuncs['signed_char_from_pyobj']="""\
static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) {
\tint i=0;
\tif (int_from_pyobj(&i,obj,errmess)) {
\t\t*v = (signed_char)i;
\t\treturn 1;
\t}
\treturn 0;
}
"""
needs['short_from_pyobj']=['int_from_pyobj']
cfuncs['short_from_pyobj']="""\
static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) {
\tint i=0;
\tif (int_from_pyobj(&i,obj,errmess)) {
\t\t*v = (short)i;
\t\treturn 1;
\t}
\treturn 0;
}
"""
cfuncs['int_from_pyobj']="""\
static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) {
\tPyObject* tmp = NULL;
\tif (PyInt_Check(obj)) {
\t\t*v = (int)PyInt_AS_LONG(obj);
\t\treturn 1;
\t}
\ttmp = PyNumber_Int(obj);
\tif (tmp) {
\t\t*v = PyInt_AS_LONG(tmp);
\t\tPy_DECREF(tmp);
\t\treturn 1;
\t}
\tif (PyComplex_Check(obj))
\t\ttmp = PyObject_GetAttrString(obj,\"real\");
\telse if (PyString_Check(obj))
\t\t/*pass*/;
\telse if (PySequence_Check(obj))
\t\ttmp = PySequence_GetItem(obj,0);
\tif (tmp) {
\t\tPyErr_Clear();
\t\tif (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
\t\tPy_DECREF(tmp);
\t}
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL) err = #modulename#_error;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
cfuncs['long_from_pyobj']="""\
static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) {
\tPyObject* tmp = NULL;
\tif (PyInt_Check(obj)) {
\t\t*v = PyInt_AS_LONG(obj);
\t\treturn 1;
\t}
\ttmp = PyNumber_Int(obj);
\tif (tmp) {
\t\t*v = PyInt_AS_LONG(tmp);
\t\tPy_DECREF(tmp);
\t\treturn 1;
\t}
\tif (PyComplex_Check(obj))
\t\ttmp = PyObject_GetAttrString(obj,\"real\");
\telse if (PyString_Check(obj))
\t\t/*pass*/;
\telse if (PySequence_Check(obj))
\t\ttmp = PySequence_GetItem(obj,0);
\tif (tmp) {
\t\tPyErr_Clear();
\t\tif (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
\t\tPy_DECREF(tmp);
\t}
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL) err = #modulename#_error;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
needs['long_long_from_pyobj']=['long_long']
cfuncs['long_long_from_pyobj']="""\
static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) {
\tPyObject* tmp = NULL;
\tif (PyLong_Check(obj)) {
\t\t*v = PyLong_AsLongLong(obj);
\t\treturn (!PyErr_Occurred());
\t}
\tif (PyInt_Check(obj)) {
\t\t*v = (long_long)PyInt_AS_LONG(obj);
\t\treturn 1;
\t}
\ttmp = PyNumber_Long(obj);
\tif (tmp) {
\t\t*v = PyLong_AsLongLong(tmp);
\t\tPy_DECREF(tmp);
\t\treturn (!PyErr_Occurred());
\t}
\tif (PyComplex_Check(obj))
\t\ttmp = PyObject_GetAttrString(obj,\"real\");
\telse if (PyString_Check(obj))
\t\t/*pass*/;
\telse if (PySequence_Check(obj))
\t\ttmp = PySequence_GetItem(obj,0);
\tif (tmp) {
\t\tPyErr_Clear();
\t\tif (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
\t\tPy_DECREF(tmp);
\t}
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL) err = #modulename#_error;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
needs['long_double_from_pyobj']=['double_from_pyobj','long_double']
cfuncs['long_double_from_pyobj']="""\
static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) {
\tdouble d=0;
\tif (PyArray_CheckScalar(obj)){
\t\tif PyArray_IsScalar(obj, LongDouble) {
\t\t\tPyArray_ScalarAsCtype(obj, v);
\t\t\treturn 1;
\t\t}
\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==PyArray_LONGDOUBLE) {
\t\t\t(*v) = *((npy_longdouble *)PyArray_DATA(obj))
\t\t\treturn 1;
\t\t}
\t}
\tif (double_from_pyobj(&d,obj,errmess)) {
\t\t*v = (long_double)d;
\t\treturn 1;
\t}
\treturn 0;
}
"""
cfuncs['double_from_pyobj']="""\
static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) {
\tPyObject* tmp = NULL;
\tif (PyFloat_Check(obj)) {
#ifdef __sgi
\t\t*v = PyFloat_AsDouble(obj);
#else
\t\t*v = PyFloat_AS_DOUBLE(obj);
#endif
\t\treturn 1;
\t}
\ttmp = PyNumber_Float(obj);
\tif (tmp) {
#ifdef __sgi
\t\t*v = PyFloat_AsDouble(tmp);
#else
\t\t*v = PyFloat_AS_DOUBLE(tmp);
#endif
\t\tPy_DECREF(tmp);
\t\treturn 1;
\t}
\tif (PyComplex_Check(obj))
\t\ttmp = PyObject_GetAttrString(obj,\"real\");
\telse if (PyString_Check(obj))
\t\t/*pass*/;
\telse if (PySequence_Check(obj))
\t\ttmp = PySequence_GetItem(obj,0);
\tif (tmp) {
\t\tPyErr_Clear();
\t\tif (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
\t\tPy_DECREF(tmp);
\t}
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL) err = #modulename#_error;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
needs['float_from_pyobj']=['double_from_pyobj']
cfuncs['float_from_pyobj']="""\
static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) {
\tdouble d=0.0;
\tif (double_from_pyobj(&d,obj,errmess)) {
\t\t*v = (float)d;
\t\treturn 1;
\t}
\treturn 0;
}
"""
needs['complex_long_double_from_pyobj']=['complex_long_double','long_double',
'complex_double_from_pyobj']
cfuncs['complex_long_double_from_pyobj']="""\
static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) {
\tcomplex_double cd={0.0,0.0};
\tif (PyArray_CheckScalar(obj)){
\t\tif PyArray_IsScalar(obj, CLongDouble) {
\t\t\tPyArray_ScalarAsCtype(obj, v);
\t\t\treturn 1;
\t\t}
\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==PyArray_CLONGDOUBLE) {
\t\t\t(*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real;
\t\t\t(*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag;
\t\t\treturn 1;
\t\t}
\t}
\tif (complex_double_from_pyobj(&cd,obj,errmess)) {
\t\t(*v).r = (long_double)cd.r;
\t\t(*v).i = (long_double)cd.i;
\t\treturn 1;
\t}
\treturn 0;
}
"""
needs['complex_double_from_pyobj']=['complex_double']
cfuncs['complex_double_from_pyobj']="""\
static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) {
\tPy_complex c;
\tif (PyComplex_Check(obj)) {
\t\tc=PyComplex_AsCComplex(obj);
\t\t(*v).r=c.real, (*v).i=c.imag;
\t\treturn 1;
\t}
\tif (PyArray_IsScalar(obj, ComplexFloating)) {
\t\tif (PyArray_IsScalar(obj, CFloat)) {
\t\t\tnpy_cfloat new;
\t\t\tPyArray_ScalarAsCtype(obj, &new);
\t\t\t(*v).r = (double)new.real;
\t\t\t(*v).i = (double)new.imag;
\t\t}
\t\telse if (PyArray_IsScalar(obj, CLongDouble)) {
\t\t\tnpy_clongdouble new;
\t\t\tPyArray_ScalarAsCtype(obj, &new);
\t\t\t(*v).r = (double)new.real;
\t\t\t(*v).i = (double)new.imag;
\t\t}
\t\telse { /* if (PyArray_IsScalar(obj, CDouble)) */
\t\t\tPyArray_ScalarAsCtype(obj, v);
\t\t}
\t\treturn 1;
\t}
\tif (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */
\t\tPyObject *arr;
\t\tif (PyArray_Check(obj)) {
\t\t\tarr = PyArray_Cast((PyArrayObject *)obj, PyArray_CDOUBLE);
\t\t}
\t\telse {
\t\t\tarr = PyArray_FromScalar(obj, PyArray_DescrFromType(PyArray_CDOUBLE));
\t\t}
\t\tif (arr==NULL) return 0;
\t\t(*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real;
\t\t(*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag;
\t\treturn 1;
\t}
\t/* Python does not provide PyNumber_Complex function :-( */
\t(*v).i=0.0;
\tif (PyFloat_Check(obj)) {
#ifdef __sgi
\t\t(*v).r = PyFloat_AsDouble(obj);
#else
\t\t(*v).r = PyFloat_AS_DOUBLE(obj);
#endif
\t\treturn 1;
\t}
\tif (PyInt_Check(obj)) {
\t\t(*v).r = (double)PyInt_AS_LONG(obj);
\t\treturn 1;
\t}
\tif (PyLong_Check(obj)) {
\t\t(*v).r = PyLong_AsDouble(obj);
\t\treturn (!PyErr_Occurred());
\t}
\tif (PySequence_Check(obj) && (!PyString_Check(obj))) {
\t\tPyObject *tmp = PySequence_GetItem(obj,0);
\t\tif (tmp) {
\t\t\tif (complex_double_from_pyobj(v,tmp,errmess)) {
\t\t\t\tPy_DECREF(tmp);
\t\t\t\treturn 1;
\t\t\t}
\t\t\tPy_DECREF(tmp);
\t\t}
\t}
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL)
\t\t\terr = PyExc_TypeError;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
needs['complex_float_from_pyobj']=['complex_float','complex_double_from_pyobj']
cfuncs['complex_float_from_pyobj']="""\
static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) {
\tcomplex_double cd={0.0,0.0};
\tif (complex_double_from_pyobj(&cd,obj,errmess)) {
\t\t(*v).r = (float)cd.r;
\t\t(*v).i = (float)cd.i;
\t\treturn 1;
\t}
\treturn 0;
}
"""
needs['try_pyarr_from_char']=['pyobj_from_char1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_char']='static int try_pyarr_from_char(PyObject* obj,char* v) {\n\tTRYPYARRAYTEMPLATE(char,\'c\');\n}\n'
needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE','unsigned_char']
cfuncs['try_pyarr_from_unsigned_char']='static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n\tTRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n'
needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE','signed_char']
cfuncs['try_pyarr_from_signed_char']='static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n\tTRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n'
needs['try_pyarr_from_short']=['pyobj_from_short1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_short']='static int try_pyarr_from_short(PyObject* obj,short* v) {\n\tTRYPYARRAYTEMPLATE(short,\'s\');\n}\n'
needs['try_pyarr_from_int']=['pyobj_from_int1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_int']='static int try_pyarr_from_int(PyObject* obj,int* v) {\n\tTRYPYARRAYTEMPLATE(int,\'i\');\n}\n'
needs['try_pyarr_from_long']=['pyobj_from_long1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_long']='static int try_pyarr_from_long(PyObject* obj,long* v) {\n\tTRYPYARRAYTEMPLATE(long,\'l\');\n}\n'
needs['try_pyarr_from_long_long']=['pyobj_from_long_long1','TRYPYARRAYTEMPLATE','long_long']
cfuncs['try_pyarr_from_long_long']='static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n\tTRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n'
needs['try_pyarr_from_float']=['pyobj_from_float1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_float']='static int try_pyarr_from_float(PyObject* obj,float* v) {\n\tTRYPYARRAYTEMPLATE(float,\'f\');\n}\n'
needs['try_pyarr_from_double']=['pyobj_from_double1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_double']='static int try_pyarr_from_double(PyObject* obj,double* v) {\n\tTRYPYARRAYTEMPLATE(double,\'d\');\n}\n'
needs['try_pyarr_from_complex_float']=['pyobj_from_complex_float1','TRYCOMPLEXPYARRAYTEMPLATE','complex_float']
cfuncs['try_pyarr_from_complex_float']='static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n'
needs['try_pyarr_from_complex_double']=['pyobj_from_complex_double1','TRYCOMPLEXPYARRAYTEMPLATE','complex_double']
cfuncs['try_pyarr_from_complex_double']='static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n'
needs['create_cb_arglist']=['CFUNCSMESS','PRINTPYOBJERR','MINMAX']
cfuncs['create_cb_arglist']="""\
static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) {
\tPyObject *tmp = NULL;
\tPyObject *tmp_fun = NULL;
\tint tot,opt,ext,siz,i,di=0;
\tCFUNCSMESS(\"create_cb_arglist\\n\");
\ttot=opt=ext=siz=0;
\t/* Get the total number of arguments */
\tif (PyFunction_Check(fun))
\t\ttmp_fun = fun;
\telse {
\t\tdi = 1;
\t\tif (PyObject_HasAttrString(fun,\"im_func\")) {
\t\t\ttmp_fun = PyObject_GetAttrString(fun,\"im_func\");
\t\t}
\t\telse if (PyObject_HasAttrString(fun,\"__call__\")) {
\t\t\ttmp = PyObject_GetAttrString(fun,\"__call__\");
\t\t\tif (PyObject_HasAttrString(tmp,\"im_func\"))
\t\t\t\ttmp_fun = PyObject_GetAttrString(tmp,\"im_func\");
\t\t\telse {
\t\t\t\ttmp_fun = fun; /* built-in function */
\t\t\t\ttot = maxnofargs;
\t\t\t\tif (xa != NULL)
\t\t\t\t\ttot += PyTuple_Size((PyObject *)xa);
\t\t\t}
\t\t\tPy_XDECREF(tmp);
\t\t}
\t\telse if (PyFortran_Check(fun) || PyFortran_Check1(fun)) {
\t\t\ttot = maxnofargs;
\t\t\tif (xa != NULL)
\t\t\t\ttot += PyTuple_Size((PyObject *)xa);
\t\t\ttmp_fun = fun;
\t\t}
\t\telse if (PyCObject_Check(fun)) {
\t\t\ttot = maxnofargs;
\t\t\tif (xa != NULL)
\t\t\t\text = PyTuple_Size((PyObject *)xa);
\t\t\tif(ext>0) {
\t\t\t\tfprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\");
\t\t\t\tgoto capi_fail;
\t\t\t}
\t\t\ttmp_fun = fun;
\t\t}
\t}
if (tmp_fun==NULL) {
fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":fun->ob_type->tp_name));
goto capi_fail;
}
\tif (PyObject_HasAttrString(tmp_fun,\"func_code\")) {
\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\"))
\t\t\ttot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di;
\t\tPy_XDECREF(tmp);
\t}
\t/* Get the number of optional arguments */
\tif (PyObject_HasAttrString(tmp_fun,\"func_defaults\"))
\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\")))
\t\t\topt = PyTuple_Size(tmp);
\t\tPy_XDECREF(tmp);
\t/* Get the number of extra arguments */
\tif (xa != NULL)
\t\text = PyTuple_Size((PyObject *)xa);
\t/* Calculate the size of call-backs argument list */
\tsiz = MIN(maxnofargs+ext,tot);
\t*nofargs = MAX(0,siz-ext);
#ifdef DEBUGCFUNCS
\tfprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs);
#endif
\tif (siz<tot-opt) {
\t\tfprintf(stderr,\"create_cb_arglist: Failed to build argument list (siz) with enough arguments (tot-opt) required by user-supplied function (siz,tot,opt=%d,%d,%d).\\n\",siz,tot,opt);
\t\tgoto capi_fail;
\t}
\t/* Initialize argument list */
\t*args = (PyTupleObject *)PyTuple_New(siz);
\tfor (i=0;i<*nofargs;i++) {
\t\tPy_INCREF(Py_None);
\t\tPyTuple_SET_ITEM((PyObject *)(*args),i,Py_None);
\t}
\tif (xa != NULL)
\t\tfor (i=(*nofargs);i<siz;i++) {
\t\t\ttmp = PyTuple_GetItem((PyObject *)xa,i-(*nofargs));
\t\t\tPy_INCREF(tmp);
\t\t\tPyTuple_SET_ITEM(*args,i,tmp);
\t\t}
\tCFUNCSMESS(\"create_cb_arglist-end\\n\");
\treturn 1;
capi_fail:
\tif ((PyErr_Occurred())==NULL)
\t\tPyErr_SetString(#modulename#_error,errmess);
\treturn 0;
}
"""
def buildcfuncs():
from capi_maps import c2capi_map
for k in c2capi_map.keys():
m='pyarr_from_p_%s1'%k
cppmacros[m]='#define %s(v) (PyArray_SimpleNewFromData(0,NULL,%s,(char *)v))'%(m,c2capi_map[k])
k='string'
m='pyarr_from_p_%s1'%k
cppmacros[m]='#define %s(v,dims) (PyArray_SimpleNewFromData(1,dims,PyArray_CHAR,(char *)v))'%(m)
############ Auxiliary functions for sorting needs ###################
def append_needs(need,flag=1):
global outneeds,needs
if type(need)==types.ListType:
for n in need:
append_needs(n,flag)
elif type(need)==types.StringType:
if not need: return
if includes0.has_key(need): n = 'includes0'
elif includes.has_key(need): n = 'includes'
elif typedefs.has_key(need): n = 'typedefs'
elif typedefs_generated.has_key(need): n = 'typedefs_generated'
elif cppmacros.has_key(need): n = 'cppmacros'
elif cfuncs.has_key(need): n = 'cfuncs'
elif callbacks.has_key(need): n = 'callbacks'
elif f90modhooks.has_key(need): n = 'f90modhooks'
elif commonhooks.has_key(need): n = 'commonhooks'
else:
errmess('append_needs: unknown need %s\n'%(`need`))
return
if need in outneeds[n]: return
if flag:
tmp={}
if needs.has_key(need):
for nn in needs[need]:
t=append_needs(nn,0)
if type(t)==types.DictType:
for nnn in t.keys():
if tmp.has_key(nnn): tmp[nnn]=tmp[nnn]+t[nnn]
else: tmp[nnn]=t[nnn]
for nn in tmp.keys():
for nnn in tmp[nn]:
if nnn not in outneeds[nn]:
outneeds[nn]=[nnn]+outneeds[nn]
outneeds[n].append(need)
else:
tmp={}
if needs.has_key(need):
for nn in needs[need]:
t=append_needs(nn,flag)
if type(t)==types.DictType:
for nnn in t.keys():
if tmp.has_key(nnn): tmp[nnn]=t[nnn]+tmp[nnn]
else: tmp[nnn]=t[nnn]
if not tmp.has_key(n): tmp[n]=[]
tmp[n].append(need)
return tmp
else:
errmess('append_needs: expected list or string but got :%s\n'%(`need`))
def get_needs():
global outneeds,needs
res={}
for n in outneeds.keys():
out=[]
saveout=copy.copy(outneeds[n])
while len(outneeds[n])>0:
if not needs.has_key(outneeds[n][0]):
out.append(outneeds[n][0])
del outneeds[n][0]
else:
flag=0
for k in outneeds[n][1:]:
if k in needs[outneeds[n][0]]:
flag=1
break
if flag:
outneeds[n]=outneeds[n][1:]+[outneeds[n][0]]
else:
out.append(outneeds[n][0])
del outneeds[n][0]
if saveout and (0 not in map(lambda x,y:x==y,saveout,outneeds[n])):
print n,saveout
errmess('get_needs: no progress in sorting needs, probably circular dependence, skipping.\n')
out=out+saveout
break
saveout=copy.copy(outneeds[n])
if out==[]: out=[n]
res[n]=out
return res
| |
# Copyright 2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file will be used with PyPi in order to package and distribute the final
# product.
"""Tails the oplog of a shard and returns entries
"""
import bson
import inspect
import json
import logging
import os
import pymongo
import sys
import time
import threading
import util
class OplogThread(threading.Thread):
"""OplogThread gathers the updates for a single oplog.
"""
def __init__(self, primary_conn, main_address, oplog_coll, is_sharded,
doc_manager, oplog_progress_dict, namespace_set, auth_key,
auth_username, repl_set=None):
"""Initialize the oplog thread.
"""
super(OplogThread, self).__init__()
#The connection to the primary for this replicaSet.
self.primary_connection = primary_conn
#The mongos for sharded setups
#Otherwise the same as primary_connection.
#The value is set later on.
self.main_connection = None
#The connection to the oplog collection
self.oplog = oplog_coll
#Boolean describing whether the cluster is sharded or not
self.is_sharded = is_sharded
#The document manager for the target system.
#This is the same for all threads.
self.doc_manager = doc_manager
#Boolean describing whether or not the thread is running.
self.running = True
#Stores the timestamp of the last oplog entry read.
self.checkpoint = None
#A dictionary that stores OplogThread/timestamp pairs.
#Represents the last checkpoint for a OplogThread.
self.oplog_progress = oplog_progress_dict
#The set of namespaces to process from the mongo cluster.
self.namespace_set = namespace_set
#If authentication is used, this is an admin password.
self.auth_key = auth_key
#This is the username used for authentication.
self.auth_username = auth_username
logging.info('OplogManager: Initializing oplog thread')
if is_sharded:
self.main_connection = pymongo.Connection(main_address)
else:
self.main_connection = pymongo.Connection(main_address,
replicaSet=repl_set)
self.oplog = self.main_connection['local']['oplog.rs']
if auth_key is not None:
#Authenticate for the whole system
primary_conn['admin'].authenticate(auth_username, auth_key)
if self.oplog.find().count() == 0:
err_msg = 'OplogThread: No oplog for thread:'
logging.error('%s %s' % (err_msg, self.primary_connection))
self.running = False
def run(self):
"""Start the oplog worker.
"""
while self.running is True:
cursor = self.init_cursor()
# we've fallen too far behind
if cursor is None and self.checkpoint is not None:
err_msg = "OplogManager: Last entry no longer in oplog"
effect = "cannot recover!"
logging.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
continue
#The only entry is the last one we processed
if util.retry_until_ok(cursor.count) == 1:
time.sleep(1)
continue
last_ts = None
err = False
try:
while True:
for entry in cursor:
#sync the current oplog operation
operation = entry['op']
ns = entry['ns']
#check if ns is excluded or not.
#also ensure non-empty namespace set.
if ns not in self.namespace_set and self.namespace_set:
continue
#delete
if operation == 'd':
entry['_id'] = entry['o']['_id']
self.doc_manager.remove(entry)
#insert/update. They are equal because of lack of support
#for partial update
elif operation == 'i' or operation == 'u':
doc = self.retrieve_doc(entry)
if doc is not None:
doc['_ts'] = util.bson_ts_to_long(entry['ts'])
doc['ns'] = ns
self.doc_manager.upsert(doc)
last_ts = entry['ts']
if not cursor.alive:
break
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure):
err = True
pass
if err is True and self.auth_key is not None:
primary_conn['admin'].authenticate(self.auth_username,
self.auth_key)
err = False
if last_ts is not None:
self.checkpoint = last_ts
self.update_checkpoint()
time.sleep(2)
def join(self):
"""Stop this thread from managing the oplog.
"""
self.running = False
threading.Thread.join(self)
def retrieve_doc(self, entry):
"""Given the doc ID's, retrieve those documents from the mongos.
"""
if not entry:
return None
namespace = entry['ns']
# Update operations don't have an 'o' field specifying the document
#- instead it specifies
# the changes. So we use 'o2' for updates to get the doc_id later.
if 'o2' in entry:
doc_field = 'o2'
else:
doc_field = 'o'
doc_id = entry[doc_field]['_id']
db_name, coll_name = namespace.split('.', 1)
coll = self.main_connection[db_name][coll_name]
doc = util.retry_until_ok(coll.find_one, {'_id': doc_id})
return doc
def get_oplog_cursor(self, timestamp):
"""Move cursor to the proper place in the oplog.
"""
if timestamp is None:
return None
cursor = util.retry_until_ok(self.oplog.find,
{'ts': {'$lte': timestamp}})
if (util.retry_until_ok(cursor.count)) == 0:
return None
# Check to see if cursor is too stale
while (True):
try:
cursor = self.oplog.find({'ts': {'$gte': timestamp}},
tailable=True, await_data=True)
cursor = cursor.sort('$natural', pymongo.ASCENDING)
cursor_len = cursor.count()
break
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure):
pass
if cursor_len == 1: # means we are the end of the oplog
self.checkpoint = timestamp
#to commit new TS after rollbacks
return cursor
elif cursor_len > 1:
doc = next(cursor)
if timestamp == doc['ts']:
return cursor
else: # error condition
logging.error('%s Bad timestamp in config file' % self.oplog)
return None
else:
#rollback, we are past the last element in the oplog
timestamp = self.rollback()
logging.info('Finished rollback')
return self.get_oplog_cursor(timestamp)
def dump_collection(self):
"""Dumps collection into the target system.
This method is called when we're initializing the cursor and have no
configs i.e. when we're starting for the first time.
"""
dump_set = self.namespace_set
#no namespaces specified
if not self.namespace_set:
db_list = self.main_connection.database_names()
for db in db_list:
if db == "config" or db == "local":
continue
coll_list = self.main_connection[db].collection_names()
for coll in coll_list:
if coll.startswith("system"):
continue
namespace = str(db) + "." + str(coll)
dump_set.append(namespace)
long_ts = None
for namespace in dump_set:
db, coll = namespace.split('.', 1)
target_coll = self.main_connection[db][coll]
cursor = util.retry_until_ok(target_coll.find)
cursor = cursor.sort('$natural', pymongo.DESCENDING)
oplog_cursor = util.retry_until_ok(self.oplog.find)
oplog_cursor = oplog_cursor.sort('$natural', pymongo.DESCENDING)
for entry in oplog_cursor:
if entry['op'] != 'i':
continue
#The 'o' field represents the document
search_doc = entry['o']
cursor.rewind()
for doc in cursor:
if search_doc == doc:
long_ts = util.bson_ts_to_long(entry['ts'])
break
if long_ts:
break
cursor.rewind()
try:
for doc in cursor:
doc['ns'] = namespace
doc['_ts'] = long_ts
self.doc_manager.upsert(doc)
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure):
err_msg = "OplogManager: Failed during dump collection"
effect = "cannot recover!"
logging.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
return
if long_ts:
long_ts = util.long_to_bson_ts(long_ts)
else: # Implies that we are just initiating the set
long_ts = self.get_last_oplog_timestamp()
return long_ts
def get_last_oplog_timestamp(self):
"""Return the timestamp of the latest entry in the oplog.
"""
curr = self.oplog.find().sort('$natural', pymongo.DESCENDING).limit(1)
if curr.count(with_limit_and_skip=True) == 0:
return None
return curr[0]['ts']
def init_cursor(self):
"""Position the cursor appropriately.
The cursor is set to either the beginning of the oplog, or
wherever it was last left off.
"""
timestamp = self.read_last_checkpoint()
if timestamp is None:
timestamp = self.dump_collection()
msg = "Dumped collection into target system"
logging.info('OplogManager: %s %s'
% (self.oplog, msg))
self.checkpoint = timestamp
cursor = self.get_oplog_cursor(timestamp)
if cursor is not None:
self.update_checkpoint()
return cursor
def update_checkpoint(self):
"""Store the current checkpoint in the oplog progress dictionary.
"""
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
oplog_dict[str(self.oplog)] = self.checkpoint
def read_last_checkpoint(self):
"""Read the last checkpoint from the oplog progress dictionary.
"""
oplog_str = str(self.oplog)
ret_val = None
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
if oplog_str in oplog_dict.keys():
ret_val = oplog_dict[oplog_str]
return ret_val
def rollback(self):
"""Rollback target system to consistent state.
The strategy is to find the latest timestamp in the target system and
the largest timestamp in the oplog less than the latest target system
timestamp. This defines the rollback window and we just roll these
back until the oplog and target system are in consistent states.
"""
self.doc_manager.commit()
last_inserted_doc = self.doc_manager.get_last_doc()
if last_inserted_doc is None:
return None
target_ts = util.long_to_bson_ts(last_inserted_doc['_ts'])
last_oplog_entry = self.oplog.find_one({'ts': {'$lte': target_ts}},
sort=[('$natural',
pymongo.DESCENDING)])
if last_oplog_entry is None:
return None
rollback_cutoff_ts = last_oplog_entry['ts']
start_ts = util.bson_ts_to_long(rollback_cutoff_ts)
end_ts = last_inserted_doc['_ts']
docs_to_rollback = self.doc_manager.search(start_ts, end_ts)
rollback_set = {} # this is a dictionary of ns:list of docs
for doc in docs_to_rollback:
ns = doc['ns']
if ns in rollback_set:
rollback_set[ns].append(doc)
else:
rollback_set[ns] = [doc]
for namespace, doc_list in rollback_set.items():
db, coll = namespace.split('.', 1)
ObjId = bson.objectid.ObjectId
bson_obj_id_list = [ObjId(doc['_id']) for doc in doc_list]
retry = util.retry_until_ok
to_update = retry(self.main_connection[db][coll].find,
{'_id': {'$in': bson_obj_id_list}})
#doc list are docs in target system, to_update are docs in mongo
doc_hash = {} # hash by _id
for doc in doc_list:
doc_hash[bson.objectid.ObjectId(doc['_id'])] = doc
to_index = []
count = 0
while True:
try:
for doc in to_update:
if doc['_id'] in doc_hash:
del doc_hash[doc['_id']]
to_index.append(doc)
break
except (pymongo.errors.OperationFailure,
pymongo.errors.AutoReconnect):
count += 1
if count > 60:
sys.exit(1)
time.sleep(1)
#delete the inconsistent documents
for doc in doc_hash.values():
self.doc_manager.remove(doc)
#insert the ones from mongo
for doc in to_index:
doc['_ts'] = util.bson_ts_to_long(rollback_cutoff_ts)
doc['ns'] = namespace
self.doc_manager.upsert(doc)
return rollback_cutoff_ts
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook, _parse_gcs_url
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class BigQueryOperator(BaseOperator):
"""
Executes BigQuery SQL queries in a specific BigQuery database
:param bql: (Deprecated. Use `sql` parameter instead) the sql code to be
executed (templated)
:type bql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'.
:param sql: the sql code to be executed (templated)
:type sql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'.
:param destination_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that, if set, will store the results
of the query. (templated)
:type destination_dataset_table: string
:param write_disposition: Specifies the action that occurs if the destination table
already exists. (default: 'WRITE_EMPTY')
:type write_disposition: string
:param create_disposition: Specifies whether the job is allowed to create new tables.
(default: 'CREATE_IF_NEEDED')
:type create_disposition: string
:param allow_large_results: Whether to allow large results.
:type allow_large_results: boolean
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allow_large_results``
must be ``true`` if this is set to ``false``. For standard SQL queries, this
flag is ignored and results are never flattened.
:type flatten_results: boolean
:param bigquery_conn_id: reference to a specific BigQuery hook.
:type bigquery_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
:type use_legacy_sql: boolean
:param maximum_billing_tier: Positive integer that serves as a multiplier
of the basic price.
Defaults to None, in which case it uses the value set in the project.
:type maximum_billing_tier: integer
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:type maximum_bytes_billed: float
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:type schema_update_options: tuple
:param query_params: a dictionary containing query parameter types and
values, passed to BigQuery.
:type query_params: dict
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:type priority: string
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and
expiration as per API specifications. Note that 'field' is not available in
conjunction with dataset.table$partition.
:type time_partitioning: dict
"""
template_fields = ('bql', 'sql', 'destination_dataset_table', 'labels')
template_ext = ('.sql', )
ui_color = '#e4f0e8'
@apply_defaults
def __init__(self,
bql=None,
sql=None,
destination_dataset_table=False,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=False,
bigquery_conn_id='bigquery_default',
delegate_to=None,
udf_config=False,
use_legacy_sql=True,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning={},
*args,
**kwargs):
super(BigQueryOperator, self).__init__(*args, **kwargs)
self.bql = bql
self.sql = sql if sql else bql
self.destination_dataset_table = destination_dataset_table
self.write_disposition = write_disposition
self.create_disposition = create_disposition
self.allow_large_results = allow_large_results
self.flatten_results = flatten_results
self.bigquery_conn_id = bigquery_conn_id
self.delegate_to = delegate_to
self.udf_config = udf_config
self.use_legacy_sql = use_legacy_sql
self.maximum_billing_tier = maximum_billing_tier
self.maximum_bytes_billed = maximum_bytes_billed
self.schema_update_options = schema_update_options
self.query_params = query_params
self.labels = labels
self.bq_cursor = None
self.priority = priority
self.time_partitioning = time_partitioning
# TODO remove `bql` in Airflow 2.0
if self.bql:
import warnings
warnings.warn('Deprecated parameter `bql` used in Task id: {}. '
'Use `sql` parameter instead to pass the sql to be '
'executed. `bql` parameter is deprecated and '
'will be removed in a future version of '
'Airflow.'.format(self.task_id),
category=DeprecationWarning)
if self.sql is None:
raise TypeError('{} missing 1 required positional '
'argument: `sql`'.format(self.task_id))
def execute(self, context):
if self.bq_cursor is None:
self.log.info('Executing: %s', self.sql)
hook = BigQueryHook(
bigquery_conn_id=self.bigquery_conn_id,
use_legacy_sql=self.use_legacy_sql,
delegate_to=self.delegate_to)
conn = hook.get_conn()
self.bq_cursor = conn.cursor()
self.bq_cursor.run_query(
self.sql,
destination_dataset_table=self.destination_dataset_table,
write_disposition=self.write_disposition,
allow_large_results=self.allow_large_results,
flatten_results=self.flatten_results,
udf_config=self.udf_config,
maximum_billing_tier=self.maximum_billing_tier,
maximum_bytes_billed=self.maximum_bytes_billed,
create_disposition=self.create_disposition,
query_params=self.query_params,
labels=self.labels,
schema_update_options=self.schema_update_options,
priority=self.priority,
time_partitioning=self.time_partitioning
)
def on_kill(self):
super(BigQueryOperator, self).on_kill()
if self.bq_cursor is not None:
self.log.info('Canceling running query due to execution timeout')
self.bq_cursor.cancel_query()
class BigQueryCreateEmptyTableOperator(BaseOperator):
"""
Creates a new, empty table in the specified BigQuery dataset,
optionally with schema.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google cloud storage object name. The object in
Google cloud storage must be a JSON file with the schema fields in it.
You can also create a table without schema.
:param project_id: The project to create the table into. (templated)
:type project_id: string
:param dataset_id: The dataset to create the table into. (templated)
:type dataset_id: string
:param table_id: The Name of the table to be created. (templated)
:type table_id: string
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:type schema_fields: list
:param gcs_schema_object: Full path to the JSON file containing
schema (templated). For
example: ``gs://test-bucket/dir1/dir2/employee_schema.json``
:type gcs_schema_object: string
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:param bigquery_conn_id: Reference to a specific BigQuery hook.
:type bigquery_conn_id: string
:param google_cloud_storage_conn_id: Reference to a specific Google
cloud storage hook.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
:param labels a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
**Example (with schema JSON in GCS)**: ::
CreateTable = BigQueryCreateEmptyTableOperator(
task_id='BigQueryCreateEmptyTableOperator_task',
dataset_id='ODS',
table_id='Employees',
project_id='internal-gcp-project',
gcs_schema_object='gs://schema-bucket/employee_schema.json',
bigquery_conn_id='airflow-service-account',
google_cloud_storage_conn_id='airflow-service-account'
)
**Corresponding Schema file** (``employee_schema.json``): ::
[
{
"mode": "NULLABLE",
"name": "emp_name",
"type": "STRING"
},
{
"mode": "REQUIRED",
"name": "salary",
"type": "INTEGER"
}
]
**Example (with schema in the DAG)**: ::
CreateTable = BigQueryCreateEmptyTableOperator(
task_id='BigQueryCreateEmptyTableOperator_task',
dataset_id='ODS',
table_id='Employees',
project_id='internal-gcp-project',
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}],
bigquery_conn_id='airflow-service-account',
google_cloud_storage_conn_id='airflow-service-account'
)
"""
template_fields = ('dataset_id', 'table_id', 'project_id',
'gcs_schema_object', 'labels')
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
dataset_id,
table_id,
project_id=None,
schema_fields=None,
gcs_schema_object=None,
time_partitioning={},
bigquery_conn_id='bigquery_default',
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
labels=None,
*args, **kwargs):
super(BigQueryCreateEmptyTableOperator, self).__init__(*args, **kwargs)
self.project_id = project_id
self.dataset_id = dataset_id
self.table_id = table_id
self.schema_fields = schema_fields
self.gcs_schema_object = gcs_schema_object
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.time_partitioning = time_partitioning
self.labels = labels
def execute(self, context):
bq_hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
if not self.schema_fields and self.gcs_schema_object:
gcs_bucket, gcs_object = _parse_gcs_url(self.gcs_schema_object)
gcs_hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
schema_fields = json.loads(gcs_hook.download(
gcs_bucket,
gcs_object).decode("utf-8"))
else:
schema_fields = self.schema_fields
conn = bq_hook.get_conn()
cursor = conn.cursor()
cursor.create_empty_table(
project_id=self.project_id,
dataset_id=self.dataset_id,
table_id=self.table_id,
schema_fields=schema_fields,
time_partitioning=self.time_partitioning,
labels=self.labels
)
class BigQueryCreateExternalTableOperator(BaseOperator):
"""
Creates a new external table in the dataset with the data in Google Cloud
Storage.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google cloud storage object name. The object in
Google cloud storage must be a JSON file with the schema fields in it.
:param bucket: The bucket to point the external table to. (templated)
:type bucket: string
:param source_objects: List of Google cloud storage URIs to point
table to. (templated)
If source_format is 'DATASTORE_BACKUP', the list must only contain a single URI.
:type object: list
:param destination_project_dataset_table: The dotted (<project>.)<dataset>.<table>
BigQuery table to load data into (templated). If <project> is not included,
project will be the project defined in the connection json.
:type destination_project_dataset_table: string
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
Should not be set when source_format is 'DATASTORE_BACKUP'.
:type schema_fields: list
:param schema_object: If set, a GCS object path pointing to a .json file that
contains the schema for the table. (templated)
:param schema_object: string
:param source_format: File format of the data.
:type source_format: string
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param field_delimiter: The delimiter to use for the CSV.
:type field_delimiter: string
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV file.
:type quote_character: string
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false).
:type allow_quoted_newlines: boolean
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result. Only applicable to CSV, ignored
for other formats.
:type allow_jagged_rows: bool
:param bigquery_conn_id: Reference to a specific BigQuery hook.
:type bigquery_conn_id: string
:param google_cloud_storage_conn_id: Reference to a specific Google
cloud storage hook.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param labels a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
"""
template_fields = ('bucket', 'source_objects',
'schema_object', 'destination_project_dataset_table', 'labels')
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
bucket,
source_objects,
destination_project_dataset_table,
schema_fields=None,
schema_object=None,
source_format='CSV',
compression='NONE',
skip_leading_rows=0,
field_delimiter=',',
max_bad_records=0,
quote_character=None,
allow_quoted_newlines=False,
allow_jagged_rows=False,
bigquery_conn_id='bigquery_default',
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
src_fmt_configs={},
labels=None,
*args, **kwargs):
super(BigQueryCreateExternalTableOperator, self).__init__(*args, **kwargs)
# GCS config
self.bucket = bucket
self.source_objects = source_objects
self.schema_object = schema_object
# BQ config
self.destination_project_dataset_table = destination_project_dataset_table
self.schema_fields = schema_fields
self.source_format = source_format
self.compression = compression
self.skip_leading_rows = skip_leading_rows
self.field_delimiter = field_delimiter
self.max_bad_records = max_bad_records
self.quote_character = quote_character
self.allow_quoted_newlines = allow_quoted_newlines
self.allow_jagged_rows = allow_jagged_rows
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.src_fmt_configs = src_fmt_configs
self.labels = labels
def execute(self, context):
bq_hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
if not self.schema_fields and self.schema_object \
and self.source_format != 'DATASTORE_BACKUP':
gcs_hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
schema_fields = json.loads(gcs_hook.download(
self.bucket,
self.schema_object).decode("utf-8"))
else:
schema_fields = self.schema_fields
source_uris = ['gs://{}/{}'.format(self.bucket, source_object)
for source_object in self.source_objects]
conn = bq_hook.get_conn()
cursor = conn.cursor()
cursor.create_external_table(
external_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
compression=self.compression,
skip_leading_rows=self.skip_leading_rows,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
src_fmt_configs=self.src_fmt_configs,
labels=self.labels
)
class BigQueryDeleteDatasetOperator(BaseOperator):
""""
This operator deletes an existing dataset from your Project in Big query.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/delete
:param project_id: The project id of the dataset.
:type project_id: string
:param dataset_id: The dataset to be deleted.
:type dataset_id: string
**Example**: ::
delete_temp_data = BigQueryDeleteDatasetOperator(
dataset_id = 'temp-dataset',
project_id = 'temp-project',
bigquery_conn_id='_my_gcp_conn_',
task_id='Deletetemp',
dag=dag)
"""
template_fields = ('dataset_id', 'project_id')
ui_color = '#f00004'
@apply_defaults
def __init__(self,
dataset_id,
project_id=None,
bigquery_conn_id='bigquery_default',
delegate_to=None,
*args, **kwargs):
self.dataset_id = dataset_id
self.project_id = project_id
self.bigquery_conn_id = bigquery_conn_id
self.delegate_to = delegate_to
self.log.info('Dataset id: %s', self.dataset_id)
self.log.info('Project id: %s', self.project_id)
super(BigQueryDeleteDatasetOperator, self).__init__(*args, **kwargs)
def execute(self, context):
bq_hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
conn = bq_hook.get_conn()
cursor = conn.cursor()
cursor.delete_dataset(
project_id=self.project_id,
dataset_id=self.dataset_id
)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of tornadis library released under the MIT license.
# See the LICENSE file for more information.
import tornado.gen
import tornado.locks
import hiredis
import collections
import functools
import logging
from tornadis.connection import Connection
from tornadis.pipeline import Pipeline
from tornadis.utils import format_args_in_redis_protocol
from tornadis.write_buffer import WriteBuffer
from tornadis.exceptions import ConnectionError, ClientError
LOG = logging.getLogger(__name__)
def discard_reply_cb(reply):
pass
class Client(object):
"""High level object to interact with redis.
Attributes:
autoconnect (boolean): True if the client is in autoconnect mode
(and in autoreconnection mode) (default True).
password (string): the password to authenticate with.
db (int): database number.
connection_kwargs (dict): :class:`Connection` object
kwargs (note that read_callback and close_callback args are
set automatically).
"""
def __init__(self, autoconnect=True, password=None, db=0,
**connection_kwargs):
"""Constructor.
Args:
autoconnect (boolean): True if the client is in autoconnect mode
(and in autoreconnection mode) (default True).
password (string): the password to authenticate with.
db (int): database number.
**connection_kwargs: :class:`Connection` object kwargs.
"""
if 'read_callback' in connection_kwargs or \
'close_callback' in connection_kwargs:
raise Exception("read_callback and close_callback are not allowed "
"to be used here.")
self.connection_kwargs = connection_kwargs
self.autoconnect = autoconnect
self.password = password
self.db = db
self.__connection = None
self.subscribed = False
self.__connection = None
self.__reader = None
# Used for normal clients
self.__callback_queue = None
# Used for subscribed clients
self._condition = tornado.locks.Condition()
self._reply_list = None
@property
def title(self):
return self.__connection._redis_server()
def is_connected(self):
"""Returns True is the client is connected to redis.
Returns:
True if the client if connected to redis.
"""
return (self.__connection is not None) and \
(self.__connection.is_connected())
@tornado.gen.coroutine
def connect(self):
"""Connects the client object to redis.
It's safe to use this method even if you are already connected.
Note: this method is useless with autoconnect mode (default).
Returns:
a Future object with True as result if the connection was ok.
"""
if self.is_connected():
raise tornado.gen.Return(True)
cb1 = self._read_callback
cb2 = self._close_callback
self.__callback_queue = collections.deque()
self._reply_list = []
self.__reader = hiredis.Reader(replyError=ClientError)
kwargs = self.connection_kwargs
self.__connection = Connection(cb1, cb2, **kwargs)
connection_status = yield self.__connection.connect()
if connection_status is not True:
# nothing left to do here, return
raise tornado.gen.Return(False)
if self.password is not None:
authentication_status = yield self._call('AUTH', self.password)
if authentication_status != b'OK':
# incorrect password, return back the result
LOG.warning("impossible to connect: bad password")
self.__connection.disconnect()
raise tornado.gen.Return(False)
if self.db != 0:
db_status = yield self._call('SELECT', self.db)
if db_status != b'OK':
LOG.warning("can't select db %s", self.db)
raise tornado.gen.Return(False)
raise tornado.gen.Return(True)
def disconnect(self):
"""Disconnects the client object from redis.
It's safe to use this method even if you are already disconnected.
"""
if not self.is_connected():
return
if self.__connection is not None:
self.__connection.disconnect()
def _close_callback(self):
"""Callback called when redis closed the connection.
The callback queue is emptied and we call each callback found
with None or with an exception object to wake up blocked client.
"""
while True:
try:
callback = self.__callback_queue.popleft()
callback(ConnectionError("closed connection"))
except IndexError:
break
if self.subscribed:
# pubsub clients
self._reply_list.append(ConnectionError("closed connection"))
self._condition.notify_all()
def _read_callback(self, data=None):
"""Callback called when some data are read on the socket.
The buffer is given to the hiredis parser. If a reply is complete,
we put the decoded reply to on the reply queue.
Args:
data (str): string (buffer) read on the socket.
"""
try:
if data is not None:
self.__reader.feed(data)
while True:
reply = self.__reader.gets()
if reply is not False:
try:
callback = self.__callback_queue.popleft()
# normal client (1 reply = 1 callback)
callback(reply)
except IndexError:
# pubsub clients
self._reply_list.append(reply)
self._condition.notify_all()
else:
break
except hiredis.ProtocolError:
# something nasty occured (corrupt stream => no way to recover)
LOG.warning("corrupted stream => disconnect")
self.disconnect()
def call(self, *args, **kwargs):
"""Calls a redis command and returns a Future of the reply.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: internal private options (do not use).
Returns:
a Future with the decoded redis reply as result (when available) or
a ConnectionError object in case of connection error.
Raises:
ClientError: your Pipeline object is empty.
Examples:
>>> @tornado.gen.coroutine
def foobar():
client = Client()
result = yield client.call("HSET", "key", "field", "val")
"""
if not self.is_connected():
if self.autoconnect:
# We use this method only when we are not contected
# to void performance penaly due to gen.coroutine decorator
return self._call_with_autoconnect(*args, **kwargs)
else:
error = ConnectionError("you are not connected and "
"autoconnect=False")
return tornado.gen.maybe_future(error)
return self._call(*args, **kwargs)
@tornado.gen.coroutine
def _call_with_autoconnect(self, *args, **kwargs):
yield self.connect()
if not self.is_connected():
raise tornado.gen.Return(ConnectionError("impossible to connect"))
res = yield self._call(*args, **kwargs)
raise tornado.gen.Return(res)
def async_call(self, *args, **kwargs):
"""Calls a redis command, waits for the reply and call a callback.
Following options are available (not part of the redis command itself):
- callback
Function called (with the result as argument) when the result
is available. If not set, the reply is silently discarded. In
case of errors, the callback is called with a
TornadisException object as argument.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: options as keyword parameters.
Examples:
>>> def cb(result):
pass
>>> client.async_call("HSET", "key", "field", "val", callback=cb)
"""
def after_autoconnect_callback(future):
if self.is_connected():
self._call(*args, **kwargs)
else:
# FIXME
pass
if 'callback' not in kwargs:
kwargs['callback'] = discard_reply_cb
if not self.is_connected():
if self.autoconnect:
connect_future = self.connect()
cb = after_autoconnect_callback
self.__connection._ioloop.add_future(connect_future, cb)
else:
error = ConnectionError("you are not connected and "
"autoconnect=False")
kwargs['callback'](error)
else:
self._call(*args, **kwargs)
def _call(self, *args, **kwargs):
callback = False
if 'callback' in kwargs:
callback = True
if len(args) == 1 and isinstance(args[0], Pipeline):
fn = self._pipelined_call
pipeline = args[0]
if pipeline.number_of_stacked_calls == 0:
excep = ClientError("empty pipeline")
if callback:
kwargs['callback'](excep)
else:
return tornado.gen.maybe_future(excep)
arguments = (pipeline,)
else:
if "__multiple_replies" in kwargs:
fn = self._simple_call_with_multiple_replies
arguments = tuple([kwargs["__multiple_replies"]] + list(args))
else:
fn = self._simple_call
arguments = args
if callback:
fn(*arguments, **kwargs)
else:
return tornado.gen.Task(fn, *arguments, **kwargs)
def _reply_aggregator(self, callback, replies, reply):
self._reply_list.append(reply)
if len(self._reply_list) == replies:
callback(self._reply_list)
self._reply_list = []
def _simple_call(self, *args, **kwargs):
callback = kwargs['callback']
msg = format_args_in_redis_protocol(*args)
self.__callback_queue.append(callback)
self.__connection.write(msg)
def _simple_call_with_multiple_replies(self, replies, *args, **kwargs):
original_callback = kwargs['callback']
msg = format_args_in_redis_protocol(*args)
callback = functools.partial(self._reply_aggregator, original_callback,
replies)
for _ in range(0, replies):
self.__callback_queue.append(callback)
self.__connection.write(msg)
def _pipelined_call(self, pipeline, callback):
buf = WriteBuffer()
replies = len(pipeline.pipelined_args)
cb = functools.partial(self._reply_aggregator, callback, replies)
for args in pipeline.pipelined_args:
self.__callback_queue.append(cb)
tmp_buf = format_args_in_redis_protocol(*args)
buf.append(tmp_buf)
self.__connection.write(buf)
def get_last_state_change_timedelta(self):
return self.__connection._state.get_last_state_change_timedelta()
| |
"""
The application view router
"""
import logging
from datetime import timedelta
import uuid
import flask
import arrow
from sqlalchemy import exc
from scheduler import app, db
import schedbuilder as builder
import models
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.StreamHandler())
@app.route('/')
def index():
"""
The home page of the app. This is the only HTML that our app returns;
the rest is JSON
"""
LOGGER.debug('Hit index page')
return flask.render_template('index.html')
@app.route('/register', methods=['POST'])
def register():
"""
Registers the user in the system.
Registering a user also logs them in.
"""
LOGGER.info('Register user')
data = flask.request.get_json()
try:
user = models.User.create(
username=data['username'],
password=data['password'],)
coach = db.session.query(models.Coach).filter(
models.Coach.id == data['coach']).one()
client = models.Client(
id=str(uuid.uuid1()),
user_id=user.id,
coach_id=coach.id)
db.session.add(user)
db.session.add(client)
db.session.commit()
status = True
flask.session['user_id'] = user.id
except exc.SQLAlchemyError:
LOGGER.exception('User registration failed')
status = False
db.session.rollback()
return flask.jsonify({'success': status})
@app.route('/login', methods=['POST'])
def login():
"""
Log in the user and mark them as logged in on the session
"""
LOGGER.info('Login')
data = flask.request.get_json()
try:
user = db.session.query(models.User).filter_by(username=data['username']).one()
if user.password_matches(data['password']):
flask.session['user_id'] = user.id
return flask.jsonify({'success': True})
except exc.SQLAlchemyError:
LOGGER.exception('Login failed for %s', data['username'])
db.session.rollback()
return flask.jsonify({'success': False})
@app.route('/logout', methods=['POST'])
def logout():
"""
Log out the user
"""
LOGGER.info('Logout')
flask.session.pop('user_id', None)
return flask.redirect('/')
@app.route('/coaches')
def coaches():
"""
Retrieves a list of coaches in the db
"""
return flask.jsonify({
'coaches': [
dict(
id=coach.id,
name=coach.user.fullname)
for coach in db.session.query(models.Coach).all()]})
@app.route('/hour/<string:today>/<int:hour>', methods=['PUT', 'DELETE'])
def hour(today, hour):
"""
Adds or removes a scheduled call for the specified day/hour
"""
user_id = flask.session.get('user_id')
if not user_id:
LOGGER.warn('Attempted infiltration by enemy agents')
return flask.jsonify({})
# get the client
try:
client = db.session.query(models.Client).filter(models.Client.user_id==user_id).one()
except exc.SQLAlchemyError:
LOGGER.exception('Failed to retrieve client')
db.session.rollback()
return flask.jsonify({})
# add schedule
if flask.request.method == 'PUT':
LOGGER.info('Scheduling call for %s at %s',
today, hour)
try:
builder.schedule_call(
day=arrow.get(today).date(),
hour=hour,
client=client)
except exc.SQLAlchemyError:
LOGGER.exception('Failed to unschedule call')
db.session.rollback()
return flask.jsonify({})
# remove schedule
elif flask.request.method == 'DELETE':
LOGGER.info('Unscheduling call for %s at %s',
today, hour)
try:
builder.unschedule_call(
day=arrow.get(today).date(),
hour=hour,
client=client)
except exc.SQLAlchemyError:
LOGGER.exception('Failed to unschedule call')
db.session.rollback()
return flask.jsonify({})
return month(today)
@app.route('/day/<string:today>')
def day(today):
"""
Gets the current calendar data for week containing `today`
GET retrieves the data for the specified week.
POST posts the appointments for a specified week.
"""
user_id = flask.session.get('user_id')
if not user_id:
LOGGER.warn('Attempted infiltration by enemy agents')
return flask.jsonify({})
try:
client = db.session.query(models.Client).filter(models.Client.user_id==user_id).one()
day = arrow.get(today).date()
LOGGER.info('Getting calendar for %s', day)
return flask.jsonify({
'name': client.user.username,
'coach': client.coach.user.fullname,
'previous': (day - timedelta(days=1)).isoformat(),
'next': (day + timedelta(days=1)).isoformat(),
'day': builder.calendar_day(day, client),
})
except exc.SQLAlchemyError:
LOGGER.exception('Day retrieval failed for %s', user_id)
db.session.rollback()
return flask.jsonify({})
@app.route('/month/<string:today>')
def month(today):
"""
Gets the current calendar data for month containing `today`
GET retrieves the data for the specified month.
POST posts the appointments for a specified month.
"""
user_id = flask.session.get('user_id')
if not user_id:
LOGGER.warn('Attempted infiltration by enemy agents')
return flask.jsonify({})
try:
client = db.session.query(models.Client).filter(models.Client.user_id==user_id).one()
start_day = arrow.get(today).date()
days = builder.monthdays(start_day)
LOGGER.info('Getting calendar for month %s', month)
return flask.jsonify({
'name': client.user.username,
'coach': client.coach.user.fullname,
'month': start_day.strftime('%B %Y'),
'previous': (days[0] - timedelta(days=1)).isoformat(),
'next': (days[-1] + timedelta(days=1)).isoformat(),
'weeks': builder.calendar_days(days, client),
})
except exc.SQLAlchemyError:
LOGGER.exception('Week retrieval failed for %s', user_id)
db.session.rollback()
return flask.jsonify({})
| |
from mongo import *
from util import getMeters, addIfKey, getTime
def isNotEmpty(item):
return len(item)
class Point:
def __init__(self, lat, lon):
self.lat = lat
self.lon = lon
self.mLat = getMeters(lat)
self.mLon = getMeters(lon)
def getLatLon(self):
return (self.lat, self.lon)
def getItem(self):
return { LAT_KEY : self.lat, LON_KEY : self.lon }
def __str__(self):
return str(self.getItem())
def __repr__(self):
return str(self.getItem())
class TruckPoint(DBItem):
tblKey = TRUCK_POINTS_KEY
__slots__ = [TRUCK_ID_KEY, TIME_KEY, VELOCITY_KEY, LAT_KEY, LON_KEY, DATE_NUM_KEY, POINT_KEY, DRIVER_KEY,
TEMPERATURE_KEY, DIRECTION_KEY, PATENT_KEY, CAPACITY_KEY, COMMUNE_KEY, TIMESTAMP_KEY]
def __init__(self, item, db):
DBItem.__init__(self, item, db)
self.truckId = item[TRUCK_ID_KEY]
self.time = item[TIME_KEY]
self.velocity = item[VELOCITY_KEY]
self.lat = item[LAT_KEY]
self.lon = item[LON_KEY]
self.dateNum = item[DATE_NUM_KEY]
self.temperature = item[TEMPERATURE_KEY]
self.direction = item[DIRECTION_KEY]
self.patent = item[PATENT_KEY]
self.commune = item[COMMUNE_KEY]
self.timestamp = item[TIMESTAMP_KEY]
self.point = Point(self.lat, self.lon)
def save(self):
self.item[TRUCK_ID_KEY] = self.truckId
self.item[TIME_KEY] = self.time
self.item[VELOCITY_KEY] = self.velocity
self.item[LAT_KEY] = self.lat
self.item[LON_KEY] = self.lon
self.item[DATE_NUM_KEY] = self.dateNum
self.item[TEMPERATURE_KEY] = self.temperature
self.item[DIRECTION_KEY] = self.direction
self.item[COMMUNE_KEY] = self.commune
self.item[PATENT_KEY] = self.patent
self.item[POINT_KEY] = self.point
self.item[TIMESTAMP_KEY] = self.timestamp
super(TruckPoint, self).save(TruckPoint.tblKey)
def getLatLon(self):
return self.point.getLatLon()
def save(self):
self.item[TRUCK_ID_KEY] = self.truckId
self.item[TIME_KEY] = self.time
self.item[VELOCITY_KEY] = self.velocity
self.item[LAT_KEY] = self.lat
self.item[LON_KEY] = self.lon
self.item[DATE_NUM_KEY] = self.dateNum
self.item[TEMPERATURE_KEY] = self.temperature
self.item[DIRECTION_KEY] = self.direction
self.item[PATENT_KEY] = self.patent
self.item[COMMUNE_KEY] = self.commune
self.item[TIMESTAMP_KEY] = self.timestamp
super(Point, self).save(Point.tblKey)
class Truck(DBItem):
tblKey = TRUCKS_KEY
__slots__ = [ID_KEY, TIME_KEY, VELOCITY_KEY, DATE_NUM_KEY,
PATENT_KEY, TIMESTAMP_KEY]
def __init__(self, item, db):
DBItem.__init__(self, item, db)
self.id = item[ID_KEY]
self.time = item[TIME_KEY]
self.velocity = item[VELOCITY_KEY]
self.dateNum = item[DATE_NUM_KEY]
self.patent = item[PATENT_KEY]
self.timestamp = item[TIMESTAMP_KEY]
def save(self):
self.item[ID_KEY] = self.id
self.item[TIME_KEY] = self.time
self.item[VELOCITY_KEY] = self.velocity
self.item[DATE_NUM_KEY] = self.dateNum
self.item[PATENT_KEY] = self.patent
self.item[TIMESTAMP_KEY] = self.timestamp
super(Truck, self).save(Truck.tblKey)
class TruckDates(DBItem):
tblKey = TRUCK_DATES_KEY
__slots__ = [AVAILABILITY_KEY, DATE_NUM_KEY, ROUTE_CENTERS_KEY]
def __init__(self, item, db):
DBItem.__init__(self, item, db)
self.availability = item[AVAILABILITY_KEY]
self.dateNum = item[DATE_NUM_KEY]
self.routeCenters = item[ROUTE_CENTERS_KEY]
def save(self):
self.item[AVAILABILITY_KEY] = self.availability
self.item[DATE_NUM_KEY] = self.dateNum
self.item[ROUTE_CENTERS_KEY] = self.routeCenters
super(TruckDates, self).save(TruckDates.tblKey)
class Stop(DBItem):
tblKey = STOPS_KEY
__slots__ = [ID_KEY, LAT_KEY, LON_KEY]
def __init__(self, item, db):
DBItem.__init__(self, item, db)
self.id = item[ID_KEY]
self.lat = item[LAT_KEY]
self.lon = item[LON_KEY]
def save(self):
self.item[ID_KEY] = self.id
self.item[LAT_KEY] = self.lat
self.item[LON_KEY] = self.lon
super(Stop, self).save(Stop.tblKey)
class StopProperties(DBItem):
tblKey = STOP_PROPS_KEY
__slots__ = [ID_KEY, STOP_PROP_ID_KEY, DATE_NUM_KEY, LAT_KEY, LON_KEY, DURATION_KEY,
TIME_KEY, RADIUS_KEY, TRUCK_ID_KEY]
def __init__(self, item, db):
DBItem.__init__(self, item, db)
self.id = item[ID_KEY]
self.dateNum = item[DATE_NUM_KEY]
self.lat = item[LAT_KEY]
self.lon = item[LON_KEY]
self.duration = item[DURATION_KEY]
self.time = item[TIME_KEY]
self.radius = item[RADIUS_KEY]
self.truckId = item[TRUCK_ID_KEY]
self.stopPropId = item[STOP_PROP_ID_KEY]
def save(self):
self.item[ID_KEY] = self.id
self.item[DATE_NUM_KEY] = self.dateNum
self.item[LAT_KEY] = self.lat
self.item[LON_KEY] = self.lon
self.item[DURATION_KEY] = self.duration
self.item[TIME_KEY] = self.time
self.item[RADIUS_KEY] = self.radius
self.item[TRUCK_ID_KEY] = self.truckId
self.item[STOP_PROP_ID_KEY] = self.stopPropId
super(StopProperties, self).save(StopProperties.tblKey)
class Input(DBItem):
tblKey = INPUT_KEY
__slots__ = [TIME_KEY, LAT_KEY, LON_KEY, FILE_NUM_KEY, M_LAT_KEY, M_LON_KEY]
def __repr__(self):
item = {}
item[TIME_KEY] = self.time
item[LAT_KEY] = self.lat
item[LON_KEY] = self.lon
return str(item)
def __str__(self):
item = {}
item[TIME_KEY] = self.time
item[LAT_KEY] = self.lat
item[LON_KEY] = self.lon
return str(item)
def __init__(self, item, db):
DBItem.__init__(self, item, db)
self.time = item[TIME_KEY]
self.lat = item[LAT_KEY]
self.lon = item[LON_KEY]
self.mLat = getMeters(self.lat)
self.mLon = getMeters(self.lon)
self.fileNum = item[FILE_NUM_KEY]
def save(self):
self.item[TIME_KEY] = self.time
self.item[LAT_KEY] = self.lat
self.item[LON_KEY] = self.lon
self.item[FILE_NUM_KEY] = self.fileNum
super(Input, self).save(Input.tblKey)
class Candidate(DBItem):
tblKey = CANDIDATE_KEY
__slots__ = [EDGES_KEY, INPUT_ID_KEY]
def __init__(self, item, db):
DBItem.__init__(self, item, db)
edges = item[EDGES_KEY]
self.edges = [Edge(edge, db) for edge in edges]
self.inputId = item[INPUT_ID_KEY]
class Output(DBItem):
tblKey = OUTPUT_KEY
__slots__ = [TIME_KEY, EDGE_ID_KEY, CONF_KEY, FILE_NUM_KEY]
def __init__(self, item, db):
DBItem.__init__(self, item, db)
self.time = item[TIME_KEY]
self.edgeId = item[EDGE_ID_KEY]
self.conf = item[CONF_KEY]
self.fileNum = item[FILE_NUM_KEY]
def save(self):
self.item[TIME_KEY] = self.time
self.item[EDGE_ID_KEY] = self.edgeId
self.self.item[CONF_KEY] = self.conf
self.item[FILE_NUM_KEY] = self.fileNum
super(Output, self).save(Output.tblKey)
| |
#!/usr/bin/env python
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# Based on openvswitch agent.
#
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Isaku Yamahata
import httplib
import socket
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo.config import cfg
from ryu.app import client
from ryu.app import conf_switch_key
from ryu.app import rest_nw_id
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron import context as q_context
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common import log
from neutron.plugins.ryu.common import config # noqa
LOG = log.getLogger(__name__)
# This is copied of nova.flags._get_my_ip()
# Agent shouldn't depend on nova module
def _get_my_ip():
"""Return the actual ip of the local machine.
This code figures out what source address would be used if some traffic
were to be sent out to some well known address on the Internet. In this
case, a Google DNS server is used, but the specific address does not
matter much. No traffic is actually sent.
"""
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, _port) = csock.getsockname()
csock.close()
return addr
def _get_ip_from_nic(nic):
ip_wrapper = ip_lib.IPWrapper()
dev = ip_wrapper.device(nic)
addrs = dev.addr.list(scope='global')
for addr in addrs:
if addr['ip_version'] == 4:
return addr['cidr'].split('/')[0]
def _get_ip(cfg_ip_str, cfg_interface_str):
ip = None
try:
ip = getattr(cfg.CONF.OVS, cfg_ip_str)
except (cfg.NoSuchOptError, cfg.NoSuchGroupError):
pass
if ip:
return ip
iface = None
try:
iface = getattr(cfg.CONF.OVS, cfg_interface_str)
except (cfg.NoSuchOptError, cfg.NoSuchGroupError):
pass
if iface:
ip = _get_ip_from_nic(iface)
if ip:
return ip
LOG.warning(_('Could not get IPv4 address from %(nic)s: %(cfg)s'),
{'nic': iface, 'cfg': cfg_interface_str})
return _get_my_ip()
def _get_tunnel_ip():
return _get_ip('tunnel_ip', 'tunnel_interface')
def _get_ovsdb_ip():
return _get_ip('ovsdb_ip', 'ovsdb_interface')
class OVSBridge(ovs_lib.OVSBridge):
def __init__(self, br_name, root_helper):
ovs_lib.OVSBridge.__init__(self, br_name, root_helper)
self.datapath_id = None
def find_datapath_id(self):
self.datapath_id = self.get_datapath_id()
def set_manager(self, target):
self.run_vsctl(["set-manager", target])
def get_ofport(self, name):
return self.db_get_val("Interface", name, "ofport")
def _get_ports(self, get_port):
ports = []
port_names = self.get_port_name_list()
for name in port_names:
if self.get_ofport(name) < 0:
continue
port = get_port(name)
if port:
ports.append(port)
return ports
def _get_external_port(self, name):
# exclude vif ports
external_ids = self.db_get_map("Interface", name, "external_ids")
if external_ids:
return
# exclude tunnel ports
options = self.db_get_map("Interface", name, "options")
if "remote_ip" in options:
return
ofport = self.get_ofport(name)
return ovs_lib.VifPort(name, ofport, None, None, self)
def get_external_ports(self):
return self._get_ports(self._get_external_port)
class VifPortSet(object):
def __init__(self, int_br, ryu_rest_client):
super(VifPortSet, self).__init__()
self.int_br = int_br
self.api = ryu_rest_client
def setup(self):
for port in self.int_br.get_external_ports():
LOG.debug(_('External port %s'), port)
self.api.update_port(rest_nw_id.NW_ID_EXTERNAL,
port.switch.datapath_id, port.ofport)
class RyuPluginApi(agent_rpc.PluginApi,
sg_rpc.SecurityGroupServerRpcApiMixin):
def get_ofp_rest_api_addr(self, context):
LOG.debug(_("Get Ryu rest API address"))
return self.call(context,
self.make_msg('get_ofp_rest_api'))
class RyuSecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin):
def __init__(self, context, plugin_rpc, root_helper):
self.context = context
self.plugin_rpc = plugin_rpc
self.root_helper = root_helper
self.init_firewall()
class OVSNeutronOFPRyuAgent(n_rpc.RpcCallback,
sg_rpc.SecurityGroupAgentRpcCallbackMixin):
RPC_API_VERSION = '1.1'
def __init__(self, integ_br, tunnel_ip, ovsdb_ip, ovsdb_port,
polling_interval, root_helper):
super(OVSNeutronOFPRyuAgent, self).__init__()
self.polling_interval = polling_interval
self._setup_rpc()
self.sg_agent = RyuSecurityGroupAgent(self.context,
self.plugin_rpc,
root_helper)
self._setup_integration_br(root_helper, integ_br, tunnel_ip,
ovsdb_port, ovsdb_ip)
def _setup_rpc(self):
self.topic = topics.AGENT
self.plugin_rpc = RyuPluginApi(topics.PLUGIN)
self.context = q_context.get_admin_context_without_session()
self.endpoints = [self]
consumers = [[topics.PORT, topics.UPDATE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
def _setup_integration_br(self, root_helper, integ_br,
tunnel_ip, ovsdb_port, ovsdb_ip):
self.int_br = OVSBridge(integ_br, root_helper)
self.int_br.find_datapath_id()
rest_api_addr = self.plugin_rpc.get_ofp_rest_api_addr(self.context)
if not rest_api_addr:
raise n_exc.Invalid(_("Ryu rest API port isn't specified"))
LOG.debug(_("Going to ofp controller mode %s"), rest_api_addr)
ryu_rest_client = client.OFPClient(rest_api_addr)
self.vif_ports = VifPortSet(self.int_br, ryu_rest_client)
self.vif_ports.setup()
sc_client = client.SwitchConfClient(rest_api_addr)
sc_client.set_key(self.int_br.datapath_id,
conf_switch_key.OVS_TUNNEL_ADDR, tunnel_ip)
# Currently Ryu supports only tcp methods. (ssl isn't supported yet)
self.int_br.set_manager('ptcp:%d' % ovsdb_port)
sc_client.set_key(self.int_br.datapath_id, conf_switch_key.OVSDB_ADDR,
'tcp:%s:%d' % (ovsdb_ip, ovsdb_port))
def port_update(self, context, **kwargs):
LOG.debug(_("Port update received"))
port = kwargs.get('port')
vif_port = self.int_br.get_vif_port_by_id(port['id'])
if not vif_port:
return
if ext_sg.SECURITYGROUPS in port:
self.sg_agent.refresh_firewall()
def _update_ports(self, registered_ports):
ports = self.int_br.get_vif_port_set()
if ports == registered_ports:
return
added = ports - registered_ports
removed = registered_ports - ports
return {'current': ports,
'added': added,
'removed': removed}
def _process_devices_filter(self, port_info):
if 'added' in port_info:
self.sg_agent.prepare_devices_filter(port_info['added'])
if 'removed' in port_info:
self.sg_agent.remove_devices_filter(port_info['removed'])
def daemon_loop(self):
ports = set()
while True:
start = time.time()
try:
port_info = self._update_ports(ports)
if port_info:
LOG.debug(_("Agent loop has new device"))
self._process_devices_filter(port_info)
ports = port_info['current']
except Exception:
LOG.exception(_("Error in agent event loop"))
elapsed = max(time.time() - start, 0)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug(_("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!"),
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging(cfg.CONF)
integ_br = cfg.CONF.OVS.integration_bridge
polling_interval = cfg.CONF.AGENT.polling_interval
root_helper = cfg.CONF.AGENT.root_helper
tunnel_ip = _get_tunnel_ip()
LOG.debug(_('tunnel_ip %s'), tunnel_ip)
ovsdb_port = cfg.CONF.OVS.ovsdb_port
LOG.debug(_('ovsdb_port %s'), ovsdb_port)
ovsdb_ip = _get_ovsdb_ip()
LOG.debug(_('ovsdb_ip %s'), ovsdb_ip)
try:
agent = OVSNeutronOFPRyuAgent(integ_br, tunnel_ip, ovsdb_ip,
ovsdb_port, polling_interval,
root_helper)
except httplib.HTTPException as e:
LOG.error(_("Initialization failed: %s"), e)
sys.exit(1)
LOG.info(_("Ryu initialization on the node is done. "
"Agent initialized successfully, now running..."))
agent.daemon_loop()
sys.exit(0)
if __name__ == "__main__":
main()
| |
import shlex
from abc import ABCMeta, abstractmethod
"""The SCHEMA defines the argument format the .ingest_*() and .emit_*()
methods should produce and accept (respectively)"""
SCHEMA = {
'image': str,
'name': str,
'cpu': int, # out of 1024
'memory': int, # in bytes
'links': list, # This is universal across formats
'logging': {
# See compose options
},
'port_mappings': [{
'host_ip': str,
'host_port': int, # 0 is a valid, non-false value
'container_ip': str,
'container_port': int,
'protocol': 'tcp' or 'udp',
'name': str,
}],
'environment': dict, # A simple key: value dictionary
'entrypoint': str, # An unsplit string
'command': str, # An unsplit string
'volumes_from': list, # A list of containers
'volumes': list, # A list of dict {'host': '/path', 'container': '/path', 'readonly': True}
'dns': list,
'domain': list,
'labels': dict,
'network': list,
'env-file': list,
'pid': str,
'fetch': [dict],
}
class BaseTransformer(object, metaclass=ABCMeta):
"""
The base class for Transformer classes to inherit from.
Basic usage should look like
.. code-block:: python
transformer = MyTransformer('./my-file.txt')
normalized_keys = transformer.ingest_containers()
"""
@staticmethod
def _list2cmdline(commands):
def quote(cmd):
"""
Make sure that each cmd in command list will be treated as a single token
:param cmd: str
:return:
"""
if len(shlex.split(cmd)) == 1:
# Already a single token, do nothing
return cmd
else:
return shlex.quote(cmd)
return ' '.join(quote(cmd) for cmd in commands)
def _read_file(self, filename):
"""
:param filename: The location of the file to read
:type filename: str
"""
with open(filename, 'r') as stream:
return self._read_stream(stream=stream)
@abstractmethod
def _read_stream(self, stream):
"""
Override this method and parse the stream to be passed to
``self.transform()``
:param stream: A file-like object
:type stream: file
"""
raise NotImplementedError
@abstractmethod
def ingest_containers(self, containers=None):
"""
Ingest self.stream and return a list of un-converted container
definitions dictionaries.
This is to normalize `where` all the container information is.
For example, Compose v1 places the container name outside the rest of the
container definition. We need to have a 'name' key in the container
definition.
:rtype: list of dict
"""
raise NotImplementedError
@abstractmethod
def emit_containers(self, containers, verbose=True):
raise NotImplementedError
@staticmethod
@abstractmethod
def validate(container):
"""
Validate that the container has all essential parameters and add any if
possible
:param container: The converted container
:type container: dict
:return: The container with all valid parameters
:rtype: dict
"""
raise NotImplementedError
def ingest_name(self, name):
return name
def emit_name(self, name):
return name
def ingest_image(self, image):
return image
def emit_image(self, image):
return image
def ingest_links(self, image):
return image
def emit_links(self, image):
return image
def ingest_user(self, user):
return user
def emit_user(self, user):
return user
def ingest_net_mode(self, net_mode):
return net_mode
def emit_net_mode(self, net_mode):
return net_mode
def ingest_network(self, network):
if not isinstance(network, list) and network is not None:
network = [network]
return network
def emit_network(self, network):
return network
def ingest_domain(self, domain):
if not isinstance(domain, list) and domain is not None:
domain = [domain]
return domain
def emit_domain(self, domain):
return domain
def ingest_dns(self, dns):
if not isinstance(dns, list) and dns is not None:
dns = [dns]
return dns
def emit_dns(self, dns):
return dns
def ingest_work_dir(self, work_dir):
return work_dir
def emit_work_dir(self, work_dir):
return work_dir
def ingest_labels(self, labels):
return labels
def emit_labels(self, labels):
return labels
def ingest_pid(self, pid):
return pid
def emit_pid(self, pid):
return pid
def ingest_env_file(self, env_file):
if not isinstance(env_file, list) and env_file is not None:
env_file = [env_file]
return env_file
def emit_env_file(self, env_file):
return env_file
def ingest_expose(self, expose):
if not isinstance(expose, list) and expose is not None:
expose = [expose]
return expose
def emit_expose(self, expose):
return expose
def ingest_privileged(self, privileged):
return privileged
def emit_privileged(self, privileged):
return privileged
def ingest_fetch(self, fetch):
return fetch
def emit_fetch(self, fetch):
return fetch
@abstractmethod
def ingest_port_mappings(self, port_mappings):
raise NotImplementedError
@abstractmethod
def emit_port_mappings(self, port_mappings):
raise NotImplementedError
@abstractmethod
def ingest_cpu(self, cpu):
raise NotImplementedError
@abstractmethod
def emit_cpu(self, cpu):
raise NotImplementedError
@abstractmethod
def ingest_memory(self, memory):
raise NotImplementedError
@abstractmethod
def emit_memory(self, memory):
raise NotImplementedError
@abstractmethod
def ingest_environment(self, environment):
raise NotImplementedError
@abstractmethod
def emit_environment(self, environment):
raise NotImplementedError
@abstractmethod
def ingest_command(self, command):
raise NotImplementedError
@abstractmethod
def emit_command(self, command):
raise NotImplementedError
@abstractmethod
def ingest_entrypoint(self, entrypoint):
raise NotImplementedError
@abstractmethod
def emit_entrypoint(self, entrypoint):
raise NotImplementedError
@abstractmethod
def ingest_volumes(self, volumes):
raise NotImplementedError
@abstractmethod
def emit_volumes(self, volumes):
raise NotImplementedError
| |
"""
2016 Gregory Way
scripts/visualize_gc_and_divergence.py
Description:
Observe GC Content distributions and locations across TADs
Usage:
Is called by 'scripts/visualize.sh' which is run inside of
'scripts/run_pipeline.sh':
python gc_content_distribution.py --TAD-Boundary 'hESC'
Output:
GC Content distribution and histogram across TADs as a .pdf file
"""
import os
import random
import argparse
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from Bio import SeqIO
from tad_util.util import assign_bin, load_tad
plt.figure.max_open_warning = 0
sns.set_style("whitegrid")
sns.set_style("ticks")
sns.set_context("paper", rc={"font.size": 20, "axes.titlesize": 20,
"axes.labelsize": 20, "xtick.labelsize": 12,
"ytick.labelsize": 12})
random.seed(123)
# Load Command Arguments
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--TAD-Boundary', help='boundary cell type. The'
'options can be "hESC", "IMR90", "mESC", or "cortex"')
args = parser.parse_args()
# Define Constants
num_bins = 50
xlab = [''] * num_bins
for x in range(0, 50, 10):
xlab[x] = x
tad_cell = args.TAD_Boundary
# Generate file names depending on input
if tad_cell in ['hESC', 'IMR90']:
genome = 'hg19'
base_dir = os.path.join('data', 'hg')
elif tad_cell in ['mESC', 'cortex']:
genome = 'mm9'
base_dir = os.path.join('data', 'mm')
else:
raise ValueError('Please input: "hESC", "IMR90", "mESC", or "cortex"')
base_file = '{}_{}'.format(genome, tad_cell)
repeat_index = os.path.join('index',
'REPEATS_index_{}.tsv.bz2'.format(base_file))
gc_fig_file = os.path.join('figures', genome,
'gc_distribution_{}.pdf'.format(base_file))
div_fig_file = os.path.join('figures', genome,
'repeat_divergence_{}.pdf'.format(base_file))
alu_fig_file = os.path.join('figures', genome,
'alu_divergence_{}.pdf'.format(base_file))
tad_loc = os.path.join(base_dir, '{}_domains_{}.bed'.format(tad_cell, genome))
if genome == 'hg19':
fasta_loc = os.path.join('data', 'hg', 'hg19_fasta')
elif genome == 'mm9':
fasta_loc = os.path.join('data', 'mm', 'mm9_fasta')
def load_fasta(chrom, fasta_loc):
"""
Retrieve fasta file
Arguments:
:param chrom: the chromosome of interest (format 'chr#')
:param fasta_loc: the location that stores the hg19 fasta files
Output:
fasta file for the given chromosome
"""
chrom_fa = os.path.join(fasta_loc, 'chr{}.fa'.format(chrom))
record = SeqIO.read(open(chrom_fa), 'fasta')
nucleotides = str(record.seq)
# FASTA file has upper and lowercase letters
# lower case = repetative elements
nucleotides = nucleotides.upper()
return nucleotides
def split_TAD_bins(tadlength, num_bins):
"""
Return a list of coordinates to partition the TAD
Arguments:
:param tadlength: how long the TAD is
:param num_bins: how many bins to split
Output:
a list of tuples with the locations for starting and ending bins
"""
avgbin = tadlength / num_bins
remainder = tadlength % num_bins
if remainder > 0:
randadd = random.sample(range(0, num_bins), remainder)
else:
randadd = []
return_list = []
current_idx = 0
for binID in range(0, num_bins):
next_idx = current_idx + avgbin
if binID in randadd:
return_list.append((current_idx + 1, next_idx + 1))
current_idx = next_idx + 1
else:
return_list.append((current_idx + 1, next_idx))
current_idx = next_idx
return return_list
def determine_gc_content(seq, start, end):
"""
Determine the gc content for a given sequence given bin coordinates
Arguments:
:param seq: a nucleotide sequence
:param start: where to subset the sequence
:param end: where to subset the sequence
Output:
A count of GC content within the specific coordinates
"""
import collections
start = int(start)
end = int(end)
subset_seq = seq[start:end]
c = collections.Counter(subset_seq)
# Known length will be zero if entire sequence is not known or `N`
known_length = sum(c[base] for base in 'ACTG')
GC = (c['G'] + c['C']) / known_length if known_length else 0.5
return GC
def get_gc_content(tad, seq, bins):
"""
Determine the gc content of all TADs across TAD bins
Arguments:
:param tad: a row in a TAD boundary DataFrame
:param seq: the SeqIO.seq object for the chromosome fasta
:param bins: int, number of bins to distribute gc content of TAD sequence
Output:
A pandas series of gc content of all TADs across bins
"""
tad_start = int(tad['start'])
tad_end = int(tad['end'])
tad_length = tad_end - tad_start
# Get the TAD bins
tad_bins = split_TAD_bins(tad_length, bins)
tad_sequence = seq[tad_start:tad_end]
# Now, loop over the TAD bins and extract GC Content
tad_gc = []
for coord in tad_bins:
start, end = coord
gc = determine_gc_content(tad_sequence, start, end)
tad_gc.append(gc)
return pd.Series(tad_gc)
# Load Data
tad_df = load_tad(tad_loc)
repeat_df = pd.read_table(repeat_index, index_col=0)
repeat_df = repeat_df.ix[~pd.isnull(repeat_df['TAD_id'])]
bin_r = repeat_df.apply(lambda x: assign_bin(x, bins=num_bins, ID='repeat'),
axis=1)
repeat_df = repeat_df.assign(tad_bin=bin_r)
repeat_df = repeat_df[repeat_df['tad_bin'] != -1]
alu_df = repeat_df[repeat_df['repeat'] == 'SINE/Alu']
# Plot divergence
p = sns.boxplot(x=repeat_df['tad_bin'], y=repeat_df['div'],
color=sns.xkcd_rgb['medium green'])
sns.despine()
p.set(xticklabels=xlab)
p.set(ylabel='Repeat Divergence', xlabel='TAD Bins')
p.set_title('')
plt.tight_layout()
plt.savefig(div_fig_file)
plt.close()
# ALU divergence
p = sns.boxplot(x=alu_df['tad_bin'], y=alu_df['div'],
color=sns.xkcd_rgb['medium green'])
sns.despine()
p.set(xticklabels=xlab)
p.set(ylabel='ALU Repeat Divergence', xlabel='TAD Bins')
p.set_title('')
plt.tight_layout()
plt.savefig(alu_fig_file)
plt.close()
# Plot GC content
gc_content_df = pd.DataFrame()
for chrom in tad_df['chromosome'].unique():
tad_sub = tad_df[tad_df['chromosome'] == chrom]
fasta = load_fasta(str(chrom), fasta_loc)
gc_content = tad_sub.apply(lambda x: get_gc_content(x, seq=fasta,
bins=num_bins), axis=1)
gc_content_df = gc_content_df.append(gc_content, ignore_index=True)
p = sns.boxplot(data=gc_content_df, color=sns.xkcd_rgb['medium green'])
sns.despine()
p.set(xticklabels=xlab)
p.set(ylabel='GC Content', xlabel='TAD Bins')
p.set_title('')
plt.tight_layout()
plt.savefig(gc_fig_file)
plt.close()
| |
import numpy as np
import warnings
from theano import config
from theano import function
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
from pylearn2.expr.probabilistic_max_pooling import max_pool_python
from pylearn2.expr.probabilistic_max_pooling import max_pool_channels_python
from pylearn2.expr.probabilistic_max_pooling import max_pool
from pylearn2.expr.probabilistic_max_pooling import max_pool_channels
from pylearn2.expr.probabilistic_max_pooling import max_pool_b01c
from pylearn2.expr.probabilistic_max_pooling import max_pool_c01b
from pylearn2.expr.probabilistic_max_pooling import max_pool_unstable
from pylearn2.expr.probabilistic_max_pooling import max_pool_softmax_op
from pylearn2.expr.probabilistic_max_pooling import \
max_pool_softmax_with_bias_op
from pylearn2.testing import no_debug_mode
def check_correctness_channelwise(f):
"""
Tests that the theano expression emitted by f computes the same values
as the ground truth python function
Note: to keep the python version as dead simple as possible (i.e., to make
sure there are not bugs in the ground truth) it uses the numerically
unstable verison of softmax. So this test does not work with too big of
numbers.
"""
rng = np.random.RandomState([2012, 7, 19])
batch_size = 5
pool_size = 4
n = 3 * pool_size
zv = rng.randn(batch_size, n).astype(config.floatX) * 1. - 1.5
top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)
p_np, h_np = max_pool_channels_python(zv, pool_size, top_down_v)
z_th = T.matrix()
z_th.name = 'z_th'
top_down_th = T.matrix()
top_down_th.name = 'top_down_th'
p_th, h_th = f(z_th, pool_size, top_down_th)
func = function([z_th, top_down_th], [p_th, h_th])
pv, hv = func(zv, top_down_v)
assert p_np.shape == pv.shape
assert h_np.shape == hv.shape
if not np.allclose(h_np, hv):
print (h_np.min(), h_np.max())
print (hv.min(), hv.max())
assert False
if not np.allclose(p_np, pv):
diff = abs(p_np - pv)
print 'max diff ', diff.max()
print 'min diff ', diff.min()
print 'ave diff ', diff.mean()
assert False
def check_correctness_sigmoid_channelwise(f):
"""
Tests that f is equivalent to the sigmoid function when the pool size is 1
"""
rng = np.random.RandomState([2012, 7, 19])
batch_size = 5
pool_size = 1
n = 3 * pool_size
zv = rng.randn(batch_size, n).astype(config.floatX) * 1. - 1.5
top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)
z_th = T.matrix()
z_th.name = 'z_th'
top_down_th = T.matrix()
top_down_th.name = 'top_down_th'
p_th, h_th = f(z_th, pool_size, top_down_th)
h_s = T.nnet.sigmoid(z_th + top_down_th)
func = function([z_th, top_down_th], [p_th, h_th, h_s])
pv, hv, h_s = func(zv, top_down_v)
p_s = h_s
assert p_s.shape == pv.shape
assert h_s.shape == hv.shape
if not np.allclose(h_s, hv):
print (h_s.min(), h_s.max())
print (hv.min(), hv.max())
assert False
if not np.allclose(p_s, pv):
diff = abs(p_s - pv)
print 'max diff ', diff.max()
print 'min diff ', diff.min()
print 'ave diff ', diff.mean()
assert False
def check_correctness(f):
rng = np.random.RandomState([2012, 7, 19])
batch_size = 5
rows = 32
cols = 30
channels = 3
pool_rows = 2
pool_cols = 3
zv = rng.randn(batch_size, rows, cols,
channels).astype(config.floatX) * 2. - 3.
p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols))
z_th = T.TensorType(broadcastable=(False, False, False, False),
dtype=config.floatX)()
z_th.name = 'z_th'
p_th, h_th = f(z_th, (pool_rows, pool_cols))
func = function([z_th], [p_th, h_th])
pv, hv = func(zv)
assert p_np.shape == pv.shape
assert h_np.shape == hv.shape
if not np.allclose(h_np, hv):
print (h_np.min(), h_np.max())
print (hv.min(), hv.max())
assert False
assert np.allclose(p_np, pv)
def check_correctness_bc01(f):
"""
Tests that the theano expression emitted by f computes the same values
as the ground truth python function
Note: to keep the python version as dead simple as possible (i.e., to make
sure there are not bugs in the ground truth) it uses the numerically
unstable verison of softmax. So this test does not work with too big of
numbers.
"""
rng = np.random.RandomState([2012, 7, 19])
batch_size = 5
rows = 32
cols = 30
channels = 3
pool_rows = 2
pool_cols = 3
zv = rng.randn(batch_size, rows, cols,
channels).astype(config.floatX) * 1. - 1.5
top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,
channels).astype(config.floatX)
p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)
z_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
z_th.name = 'z_th'
zr = z_th.dimshuffle(0, 3, 1, 2)
top_down_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
top_down_th.name = 'top_down_th'
top_down_r = top_down_th.dimshuffle(0, 3, 1, 2)
p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)
func = function([z_th, top_down_th], [p_th.dimshuffle(0, 2, 3, 1),
h_th.dimshuffle(0, 2, 3, 1)])
pv, hv = func(zv, top_down_v)
assert p_np.shape == pv.shape
assert h_np.shape == hv.shape
if not np.allclose(h_np, hv):
print (h_np.min(), h_np.max())
print (hv.min(), hv.max())
assert False
if not np.allclose(p_np, pv):
diff = abs(p_np - pv)
print 'max diff ', diff.max()
print 'min diff ', diff.min()
print 'ave diff ', diff.mean()
assert False
def check_correctness_c01b(f):
"""
Tests that the theano expression emitted by f computes the same values
as the ground truth python function
Note: to keep the python version as dead simple as possible (i.e., to make
sure there are not bugs in the ground truth) it uses the numerically
unstable version of softmax. So this test does not work with too big of
numbers.
"""
rng = np.random.RandomState([2013, 5, 6])
batch_size = 5
rows = 32
cols = 30
channels = 3
pool_rows = 2
pool_cols = 3
# Do the python ground truth in b01c format
zv = rng.randn(batch_size, rows, cols,
channels).astype(config.floatX) * 1. - 1.5
top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,
channels).astype(config.floatX)
p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)
# Dimshuffle the inputs into c01b for the theano implementation
z_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
z_th.tag.test_value = zv
z_th.name = 'z_th'
zr = z_th.dimshuffle(3, 1, 2, 0)
top_down_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
top_down_th.name = 'top_down_th'
top_down_th.tag.test_value = top_down_v
top_down_r = top_down_th.dimshuffle(3, 1, 2, 0)
p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)
func = function([z_th, top_down_th], [p_th.dimshuffle(3, 1, 2, 0),
h_th.dimshuffle(3, 1, 2, 0)])
pv, hv = func(zv, top_down_v)
if not p_np.shape == pv.shape:
raise AssertionError(str((p_np.shape, pv.shape)))
assert h_np.shape == hv.shape
if not np.allclose(h_np, hv):
print (h_np.min(), h_np.max())
print (hv.min(), hv.max())
assert False
if not np.allclose(p_np, pv):
diff = abs(p_np - pv)
print 'max diff ', diff.max()
print 'min diff ', diff.min()
print 'ave diff ', diff.mean()
assert False
warnings.warn("TODO: make sampling tests run on c01b format of pooling.")
@no_debug_mode
def check_sample_correctishness_b01c(f):
batch_size = 5
rows = 32
cols = 30
channels = 3
pool_rows = 2
pool_cols = 3
rng = np.random.RandomState([2012, 9, 26])
zv = rng.randn(batch_size, rows, cols,
channels).astype(config.floatX) * 2. - 3.
top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,
channels).astype(config.floatX)
z_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
z_th.name = 'z_th'
top_down_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
top_down_th.name = 'top_down_th'
theano_rng = MRG_RandomStreams(rng.randint(2147462579))
p_th, h_th, p_sth, h_sth = f(z_th, (pool_rows, pool_cols), top_down_th,
theano_rng)
prob_func = function([z_th, top_down_th], [p_th, h_th])
pv, hv = prob_func(zv, top_down_v)
sample_func = function([z_th, top_down_th], [p_sth, h_sth])
acc_p = 0. * pv
acc_h = 0. * hv
# make sure the test gets good coverage, ie, that it includes many
# different activation probs for both detector and pooling layer
buckets = 10
bucket_width = 1. / float(buckets)
for i in xrange(buckets):
lower_lim = i * bucket_width
upper_lim = (i+1) * bucket_width
assert np.any((pv >= lower_lim) * (pv < upper_lim))
assert np.any((hv >= lower_lim) * (hv < upper_lim))
assert upper_lim == 1.
for i in xrange(10000):
ps, hs = sample_func(zv, top_down_v)
assert ps.shape == pv.shape
assert hs.shape == hv.shape
acc_p += ps
acc_h += hs
est_p = acc_p / float(i+1)
est_h = acc_h / float(i+1)
pd = np.abs(est_p-pv)
hd = np.abs(est_h-hv)
"""
# plot maps of the estimation error, this is to see if it has
# some spatial pattern this is useful for detecting bugs like
# not handling the border correctly, etc.
from pylearn2.gui.patch_viewer import PatchViewer
pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),
is_color = False)
for i in xrange(pd.shape[0]):
for j in xrange(pd.shape[3]):
pv.add_patch((pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)
pv.show()
pv = PatchViewer((hd.shape[0],hd.shape[3]),(hd.shape[1],hd.shape[2]),
is_color = False)
for i in xrange(hd.shape[0]):
for j in xrange(hd.shape[3]):
pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)
pv.show()
"""
"""
plot expectation to estimate versus error in estimation
expect bigger errors for values closer to 0.5
from matplotlib import pyplot as plt
#nelem = reduce( lambda x, y : x*y, pd.shape)
#plt.scatter( pv.reshape(nelem), pd.reshape(nelem))
#plt.show()
nelem = reduce( lambda x, y : x*y, hd.shape)
plt.scatter( hv.reshape(nelem), hd.reshape(nelem))
plt.show()
"""
# don't really know how tight this should be
# but you can try to pose an equivalent problem
# and implement it in another way
# using a numpy implementation in softmax_acc.py
# I got a max error of .17
assert max(pd.max(), hd.max()) < .17
# Do exhaustive checks on just the last sample
assert np.all((ps == 0) + (ps == 1))
assert np.all((hs == 0) + (hs == 1))
for k in xrange(batch_size):
for i in xrange(ps.shape[1]):
for j in xrange(ps.shape[2]):
for l in xrange(channels):
p = ps[k, i, j, l]
h = hs[k, i*pool_rows:(i+1)*pool_rows,
j*pool_cols:(j+1)*pool_cols, l]
assert h.shape == (pool_rows, pool_cols)
assert p == h.max()
""" If you made it to here, it's correctish
(cant tell if samples are perfectly "correct") """
@no_debug_mode
def check_sample_correctishness_c01b(f):
batch_size = 5
rows = 32
cols = 30
channels = 3
pool_rows = 2
pool_cols = 3
rng = np.random.RandomState([2012, 9, 26])
zv = rng.randn(channels, rows, cols,
batch_size).astype(config.floatX) * 2. - 3.
top_down_v = rng.randn(channels, rows / pool_rows, cols / pool_cols,
batch_size).astype(config.floatX)
z_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
z_th.name = 'z_th'
z_th.tag.test_value = zv
top_down_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
top_down_th.name = 'top_down_th'
top_down_th.tag.test_value = top_down_v
theano_rng = MRG_RandomStreams(rng.randint(2147462579))
p_th, h_th, p_sth, h_sth = f(z_th, (pool_rows, pool_cols), top_down_th,
theano_rng)
prob_func = function([z_th, top_down_th], [p_th, h_th])
pv, hv = prob_func(zv, top_down_v)
sample_func = function([z_th, top_down_th], [p_sth, h_sth])
acc_p = 0. * pv
acc_h = 0. * hv
# make sure the test gets good coverage, ie, that it includes
# many different activation probs for both detector and pooling layer
buckets = 10
bucket_width = 1. / float(buckets)
for i in xrange(buckets):
lower_lim = i * bucket_width
upper_lim = (i+1) * bucket_width
assert np.any((pv >= lower_lim) * (pv < upper_lim))
assert np.any((hv >= lower_lim) * (hv < upper_lim))
assert upper_lim == 1.
for i in xrange(10000):
ps, hs = sample_func(zv, top_down_v)
assert ps.shape == pv.shape
assert hs.shape == hv.shape
acc_p += ps
acc_h += hs
est_p = acc_p / float(i+1)
est_h = acc_h / float(i+1)
pd = np.abs(est_p-pv)
hd = np.abs(est_h-hv)
# don't really know how tight this should be
# but you can try to pose an equivalent problem
# and implement it in another way
# using a numpy implementation in softmax_acc.py
# I got a max error of .17
assert max(pd.max(), hd.max()) < .17
# Do exhaustive checks on just the last sample
assert np.all((ps == 0) + (ps == 1))
assert np.all((hs == 0) + (hs == 1))
for k in xrange(batch_size):
for i in xrange(ps.shape[1]):
for j in xrange(ps.shape[2]):
for l in xrange(channels):
p = ps[l, i, j, k]
h = hs[l, i*pool_rows:(i+1)*pool_rows,
j*pool_cols:(j+1)*pool_cols, k]
assert h.shape == (pool_rows, pool_cols)
assert p == h.max()
""" If you made it to here, it's correctish
(cant tell if samples are perfectly "correct") """
@no_debug_mode
def check_sample_correctishness_bc01(f):
"""
Tests that the sample mean converges to the conditional
expectation given by the function
Tests that p really is the max of the samples
Tests that at most one h in a group is on
"""
batch_size = 5
rows = 32
cols = 30
channels = 3
pool_rows = 2
pool_cols = 3
rng = np.random.RandomState([2012, 9, 26])
zv = rng.randn(batch_size, channels, rows,
cols).astype(config.floatX) * 2. - 3.
top_down_v = rng.randn(batch_size, channels, rows / pool_rows,
cols / pool_cols).astype(config.floatX)
z_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
z_th.tag.test_value = zv
z_th.name = 'z_th'
top_down_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
top_down_th.tag.test_value = top_down_v
top_down_th.name = 'top_down_th'
theano_rng = MRG_RandomStreams(rng.randint(2147462579))
p_th, h_th, p_sth, h_sth = f(z_th, (pool_rows, pool_cols), top_down_th,
theano_rng)
prob_func = function([z_th, top_down_th], [p_th, h_th])
pv, hv = prob_func(zv, top_down_v)
sample_func = function([z_th, top_down_th], [p_sth, h_sth])
acc_p = 0. * pv
acc_h = 0. * hv
# make sure the test gets good coverage, ie, that it includes many
# different activation probs for both detector and pooling layer
buckets = 10
bucket_width = 1. / float(buckets)
for i in xrange(buckets):
lower_lim = i * bucket_width
upper_lim = (i+1) * bucket_width
assert np.any((pv >= lower_lim) * (pv < upper_lim))
assert np.any((hv >= lower_lim) * (hv < upper_lim))
assert upper_lim == 1.
for i in xrange(10000):
ps, hs = sample_func(zv, top_down_v)
assert ps.shape == pv.shape
assert hs.shape == hv.shape
acc_p += ps
acc_h += hs
est_p = acc_p / float(i+1)
est_h = acc_h / float(i+1)
pd = np.abs(est_p-pv)
hd = np.abs(est_h-hv)
"""
# plot maps of the estimation error, this is to see if it has some
# spatial pattern this is useful for detecting bugs like not handling
# the border correctly, etc.
from pylearn2.gui.patch_viewer import PatchViewer
pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),
is_color = False)
for i in xrange(pd.shape[0]):
for j in xrange(pd.shape[3]):
pv.add_patch( (pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)
pv.show()
pv = PatchViewer((hd.shape[0],hd.shape[3]), (hd.shape[1],hd.shape[2]),
is_color = False)
for i in xrange(hd.shape[0]):
for j in xrange(hd.shape[3]):
pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)
pv.show()
"""
"""
plot expectation to estimate versus error in estimation
expect bigger errors for values closer to 0.5
from matplotlib import pyplot as plt
#nelem = reduce( lambda x, y : x*y, pd.shape)
#plt.scatter( pv.reshape(nelem), pd.reshape(nelem))
#plt.show()
nelem = reduce( lambda x, y : x*y, hd.shape)
plt.scatter( hv.reshape(nelem), hd.reshape(nelem))
plt.show()
"""
# don't really know how tight this should be
# but you can try to pose an equivalent problem
# and implement it in another way
# using a numpy implementation in softmax_acc.py
# I got a max error of .17
assert max(pd.max(), hd.max()) < .17
# Do exhaustive checks on just the last sample
assert np.all((ps == 0) + (ps == 1))
assert np.all((hs == 0) + (hs == 1))
for k in xrange(batch_size):
for i in xrange(ps.shape[2]):
for j in xrange(ps.shape[3]):
for l in xrange(channels):
p = ps[k, l, i, j]
h = hs[k, l, i*pool_rows:(i+1)*pool_rows,
j*pool_cols:(j+1)*pool_cols]
assert h.shape == (pool_rows, pool_cols)
assert p == h.max()
assert h.sum() <= 1
""" If you made it to here, it's correctish
(cant tell if samples are perfectly "correct") """
@no_debug_mode
def check_sample_correctishness_channelwise(f):
"""
Tests that the sample mean converges to the conditional expectation given
by the function Tests that p really is the max of the samples tests that
at most one h in a group is on
"""
batch_size = 27
pool_size = 4
n = pool_size * 21
rng = np.random.RandomState([2012, 9, 26])
zv = rng.randn(batch_size, n).astype(config.floatX) * 3.5 - 5.
top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)
z_th = T.matrix()
z_th.tag.test_value = zv
z_th.name = 'z_th'
top_down_th = T.matrix()
top_down_th.tag.test_value = top_down_v
top_down_th.name = 'top_down_th'
theano_rng = MRG_RandomStreams(rng.randint(2147462579))
p_th, h_th, p_sth, h_sth = f(z_th, pool_size, top_down_th, theano_rng)
prob_func = function([z_th, top_down_th], [p_th, h_th])
pv, hv = prob_func(zv, top_down_v)
sample_func = function([z_th, top_down_th], [p_sth, h_sth])
acc_p = 0. * pv
acc_h = 0. * hv
# make sure the test gets good coverage, ie, that it includes
# many different activation probs for both detector and pooling layer
buckets = 10
bucket_width = 1. / float(buckets)
print pv.min(), pv.max()
print hv.min(), hv.max()
for i in xrange(buckets):
lower_lim = i * bucket_width
upper_lim = (i+1) * bucket_width
print lower_lim, upper_lim
assert np.any((pv >= lower_lim) * (pv < upper_lim))
assert np.any((hv >= lower_lim) * (hv < upper_lim))
assert upper_lim == 1.
for i in xrange(10000):
ps, hs = sample_func(zv, top_down_v)
assert ps.shape == pv.shape
assert hs.shape == hv.shape
acc_p += ps
acc_h += hs
est_p = acc_p / float(i+1)
est_h = acc_h / float(i+1)
pd = np.abs(est_p-pv)
hd = np.abs(est_h-hv)
"""
# plot maps of the estimation error, this is to see if it has some
# spatial pattern this is useful for detecting bugs like not handling
# the border correctly, etc.
# from pylearn2.gui.patch_viewer import PatchViewer
pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),
is_color = False)
for i in xrange(pd.shape[0]):
for j in xrange(pd.shape[3]):
pv.add_patch( (pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)
pv.show()
pv = PatchViewer((hd.shape[0],hd.shape[3]),(hd.shape[1],hd.shape[2]),
is_color = False)
for i in xrange(hd.shape[0]):
for j in xrange(hd.shape[3]):
pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)
pv.show()
"""
"""
plot expectation to estimate versus error in estimation
expect bigger errors for values closer to 0.5
from matplotlib import pyplot as plt
#nelem = reduce( lambda x, y : x*y, pd.shape)
#plt.scatter( pv.reshape(nelem), pd.reshape(nelem))
#plt.show()
nelem = reduce( lambda x, y : x*y, hd.shape)
plt.scatter( hv.reshape(nelem), hd.reshape(nelem))
plt.show()
"""
# don't really know how tight this should be
# but you can try to pose an equivalent problem
# and implement it in another way
# using a numpy implementation in softmax_acc.py
# I got a max error of .17
assert max(pd.max(), hd.max()) < .17
# Do exhaustive checks on just the last sample
assert np.all((ps == 0) + (ps == 1))
assert np.all((hs == 0) + (hs == 1))
for k in xrange(batch_size):
for i in xrange(ps.shape[1]):
p = ps[k, i]
h = hs[k, i*pool_size:(i+1)*pool_size]
assert h.shape == (pool_size,)
assert p == h.max()
assert h.sum() <= 1
""" If you made it to here, it's correctish
(cant tell if samples are perfectly "correct") """
def test_max_pool_channels():
check_correctness_channelwise(max_pool_channels)
def test_max_pool_channels_sigmoid():
check_correctness_sigmoid_channelwise(max_pool_channels)
def test_max_pool_channels_samples():
check_sample_correctishness_channelwise(max_pool_channels)
def test_max_pool():
check_correctness_bc01(max_pool)
def test_max_pool_c01b():
check_correctness_c01b(max_pool_c01b)
def test_max_pool_samples():
check_sample_correctishness_bc01(max_pool)
def test_max_pool_b01c_samples():
check_sample_correctishness_b01c(max_pool_b01c)
def test_max_pool_c01b_samples():
check_sample_correctishness_c01b(max_pool_c01b)
def test_max_pool_b01c():
check_correctness(max_pool_b01c)
def test_max_pool_unstable():
check_correctness(max_pool_unstable)
def test_max_pool_softmax_op():
check_correctness(max_pool_softmax_op)
def test_max_pool_softmax_with_bias_op():
check_correctness(max_pool_softmax_with_bias_op)
| |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import distutils.spawn
import errno
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import unittest
from six import moves
import grpc
import grpc.experimental
from tests.unit import test_common
from tests.unit.framework.common import test_constants
import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2
import tests.protoc_plugin.protos.requests.r.test_requests_pb2 as request_pb2
import tests.protoc_plugin.protos.responses.test_responses_pb2 as response_pb2
import tests.protoc_plugin.protos.service.test_service_pb2_grpc as service_pb2_grpc
# Identifiers of entities we expect to find in the generated module.
STUB_IDENTIFIER = 'TestServiceStub'
SERVICER_IDENTIFIER = 'TestServiceServicer'
ADD_SERVICER_TO_SERVER_IDENTIFIER = 'add_TestServiceServicer_to_server'
class _ServicerMethods(object):
def __init__(self):
self._condition = threading.Condition()
self._paused = False
self._fail = False
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
@contextlib.contextmanager
def fail(self): # pylint: disable=invalid-name
with self._condition:
self._fail = True
yield
with self._condition:
self._fail = False
def _control(self): # pylint: disable=invalid-name
with self._condition:
if self._fail:
raise ValueError()
while self._paused:
self._condition.wait()
def UnaryCall(self, request, unused_rpc_context):
response = response_pb2.SimpleResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_rpc_context):
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_rpc_context):
response = response_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
response.aggregated_payload_size = aggregated_payload_size
self._control()
return response
def FullDuplexCall(self, request_iter, unused_rpc_context):
for request in request_iter:
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def HalfDuplexCall(self, request_iter, unused_rpc_context):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
for response in responses:
yield response
class _Service(
collections.namedtuple('_Service', (
'servicer_methods',
'server',
'stub',
))):
"""A live and running service.
Attributes:
servicer_methods: The _ServicerMethods servicing RPCs.
server: The grpc.Server servicing RPCs.
stub: A stub on which to invoke RPCs.
"""
def _CreateService():
"""Provides a servicer backend and a stub.
Returns:
A _Service with which to test RPCs.
"""
servicer_methods = _ServicerMethods()
class Servicer(getattr(service_pb2_grpc, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, context):
return servicer_methods.UnaryCall(request, context)
def StreamingOutputCall(self, request, context):
return servicer_methods.StreamingOutputCall(request, context)
def StreamingInputCall(self, request_iterator, context):
return servicer_methods.StreamingInputCall(request_iterator,
context)
def FullDuplexCall(self, request_iterator, context):
return servicer_methods.FullDuplexCall(request_iterator, context)
def HalfDuplexCall(self, request_iterator, context):
return servicer_methods.HalfDuplexCall(request_iterator, context)
server = test_common.test_server()
getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(),
server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = getattr(service_pb2_grpc, STUB_IDENTIFIER)(channel)
return _Service(servicer_methods, server, stub)
def _CreateIncompleteService():
"""Provides a servicer backend that fails to implement methods and its stub.
Returns:
A _Service with which to test RPCs. The returned _Service's
servicer_methods implements none of the methods required of it.
"""
class Servicer(getattr(service_pb2_grpc, SERVICER_IDENTIFIER)):
pass
server = test_common.test_server()
getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(),
server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = getattr(service_pb2_grpc, STUB_IDENTIFIER)(channel)
return _Service(None, server, stub)
def _streaming_input_request_iterator():
for _ in range(3):
request = request_pb2.StreamingInputCallRequest()
request.payload.payload_type = payload_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
def _streaming_output_request():
request = request_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
request.response_parameters.add(size=sizes[2], interval_us=0)
return request
def _full_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
class PythonPluginTest(unittest.TestCase):
"""Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.future()`) only gives futures for the *response-unary*
methods and does not exist for response-streaming methods.
"""
def testImportAttributes(self):
# check that we can access the generated module and its members.
self.assertIsNotNone(getattr(service_pb2_grpc, STUB_IDENTIFIER, None))
self.assertIsNotNone(
getattr(service_pb2_grpc, SERVICER_IDENTIFIER, None))
self.assertIsNotNone(
getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER, None))
def testUpDown(self):
service = _CreateService()
self.assertIsNotNone(service.servicer_methods)
self.assertIsNotNone(service.server)
self.assertIsNotNone(service.stub)
service.server.stop(None)
def testIncompleteServicer(self):
service = _CreateIncompleteService()
request = request_pb2.SimpleRequest(response_size=13)
with self.assertRaises(grpc.RpcError) as exception_context:
service.stub.UnaryCall(request)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNIMPLEMENTED)
service.server.stop(None)
def testUnaryCall(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
response = service.stub.UnaryCall(request)
expected_response = service.servicer_methods.UnaryCall(
request, 'not a real context!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testUnaryCallFuture(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
# Check that the call does not block waiting for the server to respond.
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(request)
response = response_future.result()
expected_response = service.servicer_methods.UnaryCall(
request, 'not a real RpcContext!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testUnaryCallFutureExpired(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(
request, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
self.assertIs(response_future.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testUnaryCallFutureCancelled(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(request)
response_future.cancel()
self.assertTrue(response_future.cancelled())
self.assertIs(response_future.code(), grpc.StatusCode.CANCELLED)
service.server.stop(None)
def testUnaryCallFutureFailed(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.fail():
response_future = service.stub.UnaryCall.future(request)
self.assertIsNotNone(response_future.exception())
self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testStreamingOutputCall(self):
service = _CreateService()
request = _streaming_output_request()
responses = service.stub.StreamingOutputCall(request)
expected_responses = service.servicer_methods.StreamingOutputCall(
request, 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
service.server.stop(None)
def testStreamingOutputCallExpired(self):
service = _CreateService()
request = _streaming_output_request()
with service.servicer_methods.pause():
responses = service.stub.StreamingOutputCall(
request, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
list(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testStreamingOutputCallCancelled(self):
service = _CreateService()
request = _streaming_output_request()
responses = service.stub.StreamingOutputCall(request)
next(responses)
responses.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(responses.code(), grpc.StatusCode.CANCELLED)
service.server.stop(None)
def testStreamingOutputCallFailed(self):
service = _CreateService()
request = _streaming_output_request()
with service.servicer_methods.fail():
responses = service.stub.StreamingOutputCall(request)
self.assertIsNotNone(responses)
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testStreamingInputCall(self):
service = _CreateService()
response = service.stub.StreamingInputCall(
_streaming_input_request_iterator())
expected_response = service.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testStreamingInputCallFuture(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
response = response_future.result()
expected_response = service.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testStreamingInputCallFutureExpired(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator(),
timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIs(response_future.exception().code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testStreamingInputCallFutureCancelled(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
service.server.stop(None)
def testStreamingInputCallFutureFailed(self):
service = _CreateService()
with service.servicer_methods.fail():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
self.assertIsNotNone(response_future.exception())
self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testFullDuplexCall(self):
service = _CreateService()
responses = service.stub.FullDuplexCall(_full_duplex_request_iterator())
expected_responses = service.servicer_methods.FullDuplexCall(
_full_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
service.server.stop(None)
def testFullDuplexCallExpired(self):
request_iterator = _full_duplex_request_iterator()
service = _CreateService()
with service.servicer_methods.pause():
responses = service.stub.FullDuplexCall(
request_iterator, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
list(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testFullDuplexCallCancelled(self):
service = _CreateService()
request_iterator = _full_duplex_request_iterator()
responses = service.stub.FullDuplexCall(request_iterator)
next(responses)
responses.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.CANCELLED)
service.server.stop(None)
def testFullDuplexCallFailed(self):
request_iterator = _full_duplex_request_iterator()
service = _CreateService()
with service.servicer_methods.fail():
responses = service.stub.FullDuplexCall(request_iterator)
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testHalfDuplexCall(self):
service = _CreateService()
def half_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = service.stub.HalfDuplexCall(half_duplex_request_iterator())
expected_responses = service.servicer_methods.HalfDuplexCall(
half_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
service.server.stop(None)
def testHalfDuplexCallWedged(self):
condition = threading.Condition()
wait_cell = [False]
@contextlib.contextmanager
def wait(): # pylint: disable=invalid-name
# Where's Python 3's 'nonlocal' statement when you need it?
with condition:
wait_cell[0] = True
yield
with condition:
wait_cell[0] = False
condition.notify_all()
def half_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
with condition:
while wait_cell[0]:
condition.wait()
service = _CreateService()
with wait():
responses = service.stub.HalfDuplexCall(
half_duplex_request_iterator(),
timeout=test_constants.SHORT_TIMEOUT)
# half-duplex waits for the client to send all info
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
@unittest.skipIf(sys.version_info[0] < 3 or sys.version_info[1] < 6,
"Unsupported on Python 2.")
class SimpleStubsPluginTest(unittest.TestCase):
servicer_methods = _ServicerMethods()
class Servicer(service_pb2_grpc.TestServiceServicer):
def UnaryCall(self, request, context):
return SimpleStubsPluginTest.servicer_methods.UnaryCall(
request, context)
def StreamingOutputCall(self, request, context):
return SimpleStubsPluginTest.servicer_methods.StreamingOutputCall(
request, context)
def StreamingInputCall(self, request_iterator, context):
return SimpleStubsPluginTest.servicer_methods.StreamingInputCall(
request_iterator, context)
def FullDuplexCall(self, request_iterator, context):
return SimpleStubsPluginTest.servicer_methods.FullDuplexCall(
request_iterator, context)
def HalfDuplexCall(self, request_iterator, context):
return SimpleStubsPluginTest.servicer_methods.HalfDuplexCall(
request_iterator, context)
def setUp(self):
super(SimpleStubsPluginTest, self).setUp()
self._server = test_common.test_server()
service_pb2_grpc.add_TestServiceServicer_to_server(
self.Servicer(), self._server)
self._port = self._server.add_insecure_port('[::]:0')
self._server.start()
self._target = 'localhost:{}'.format(self._port)
def tearDown(self):
self._server.stop(None)
super(SimpleStubsPluginTest, self).tearDown()
def testUnaryCall(self):
request = request_pb2.SimpleRequest(response_size=13)
response = service_pb2_grpc.TestService.UnaryCall(
request,
self._target,
channel_credentials=grpc.experimental.insecure_channel_credentials(
),
wait_for_ready=True)
expected_response = self.servicer_methods.UnaryCall(
request, 'not a real context!')
self.assertEqual(expected_response, response)
def testUnaryCallInsecureSugar(self):
request = request_pb2.SimpleRequest(response_size=13)
response = service_pb2_grpc.TestService.UnaryCall(request,
self._target,
insecure=True,
wait_for_ready=True)
expected_response = self.servicer_methods.UnaryCall(
request, 'not a real context!')
self.assertEqual(expected_response, response)
def testStreamingOutputCall(self):
request = _streaming_output_request()
expected_responses = self.servicer_methods.StreamingOutputCall(
request, 'not a real RpcContext!')
responses = service_pb2_grpc.TestService.StreamingOutputCall(
request,
self._target,
channel_credentials=grpc.experimental.insecure_channel_credentials(
),
wait_for_ready=True)
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testStreamingInputCall(self):
response = service_pb2_grpc.TestService.StreamingInputCall(
_streaming_input_request_iterator(),
self._target,
channel_credentials=grpc.experimental.insecure_channel_credentials(
),
wait_for_ready=True)
expected_response = self.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testFullDuplexCall(self):
responses = service_pb2_grpc.TestService.FullDuplexCall(
_full_duplex_request_iterator(),
self._target,
channel_credentials=grpc.experimental.insecure_channel_credentials(
),
wait_for_ready=True)
expected_responses = self.servicer_methods.FullDuplexCall(
_full_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testHalfDuplexCall(self):
def half_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = service_pb2_grpc.TestService.HalfDuplexCall(
half_duplex_request_iterator(),
self._target,
channel_credentials=grpc.experimental.insecure_channel_credentials(
),
wait_for_ready=True)
expected_responses = self.servicer_methods.HalfDuplexCall(
half_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
class ModuleMainTest(unittest.TestCase):
"""Test case for running `python -m grpc_tools.protoc`.
"""
def test_clean_output(self):
if sys.executable is None:
raise unittest.SkipTest(
"Running on a interpreter that cannot be invoked from the CLI.")
proto_dir_path = os.path.join("src", "proto")
test_proto_path = os.path.join(proto_dir_path, "grpc", "testing",
"empty.proto")
streams = tuple(tempfile.TemporaryFile() for _ in range(2))
work_dir = tempfile.mkdtemp()
try:
invocation = (sys.executable, "-m", "grpc_tools.protoc",
"--proto_path", proto_dir_path, "--python_out",
work_dir, "--grpc_python_out", work_dir,
test_proto_path)
proc = subprocess.Popen(invocation,
stdout=streams[0],
stderr=streams[1])
proc.wait()
outs = []
for stream in streams:
stream.seek(0)
self.assertEqual(0, len(stream.read()))
self.assertEqual(0, proc.returncode)
except Exception: # pylint: disable=broad-except
shutil.rmtree(work_dir)
if __name__ == '__main__':
unittest.main(verbosity=2)
| |
import itertools
import numpy as np
import operator
from numba.core import types, errors
from numba import prange
from numba.parfors.parfor import internal_prange
from numba.core.typing.templates import (AttributeTemplate, ConcreteTemplate,
AbstractTemplate, infer_global, infer,
infer_getattr, signature,
bound_function, make_callable_template)
from numba.cpython.builtins import get_type_min_value, get_type_max_value
from numba.core.extending import (
typeof_impl, type_callable, models, register_model, make_attribute_wrapper,
)
@infer_global(print)
class Print(AbstractTemplate):
def generic(self, args, kws):
for a in args:
sig = self.context.resolve_function_type("print_item", (a,), {})
if sig is None:
raise TypeError("Type %s is not printable." % a)
assert sig.return_type is types.none
return signature(types.none, *args)
@infer
class PrintItem(AbstractTemplate):
key = "print_item"
def generic(self, args, kws):
arg, = args
return signature(types.none, *args)
@infer_global(abs)
class Abs(ConcreteTemplate):
int_cases = [signature(ty, ty) for ty in sorted(types.signed_domain)]
uint_cases = [signature(ty, ty) for ty in sorted(types.unsigned_domain)]
real_cases = [signature(ty, ty) for ty in sorted(types.real_domain)]
complex_cases = [signature(ty.underlying_float, ty)
for ty in sorted(types.complex_domain)]
cases = int_cases + uint_cases + real_cases + complex_cases
@infer_global(slice)
class Slice(ConcreteTemplate):
cases = [
signature(types.slice2_type, types.intp),
signature(types.slice2_type, types.none),
signature(types.slice2_type, types.none, types.none),
signature(types.slice2_type, types.none, types.intp),
signature(types.slice2_type, types.intp, types.none),
signature(types.slice2_type, types.intp, types.intp),
signature(types.slice3_type, types.intp, types.intp, types.intp),
signature(types.slice3_type, types.none, types.intp, types.intp),
signature(types.slice3_type, types.intp, types.none, types.intp),
signature(types.slice3_type, types.intp, types.intp, types.none),
signature(types.slice3_type, types.intp, types.none, types.none),
signature(types.slice3_type, types.none, types.intp, types.none),
signature(types.slice3_type, types.none, types.none, types.intp),
signature(types.slice3_type, types.none, types.none, types.none),
]
@infer_global(range, typing_key=range)
@infer_global(prange, typing_key=prange)
@infer_global(internal_prange, typing_key=internal_prange)
class Range(ConcreteTemplate):
cases = [
signature(types.range_state32_type, types.int32),
signature(types.range_state32_type, types.int32, types.int32),
signature(types.range_state32_type, types.int32, types.int32,
types.int32),
signature(types.range_state64_type, types.int64),
signature(types.range_state64_type, types.int64, types.int64),
signature(types.range_state64_type, types.int64, types.int64,
types.int64),
signature(types.unsigned_range_state64_type, types.uint64),
signature(types.unsigned_range_state64_type, types.uint64, types.uint64),
signature(types.unsigned_range_state64_type, types.uint64, types.uint64,
types.uint64),
]
@infer
class GetIter(AbstractTemplate):
key = "getiter"
def generic(self, args, kws):
assert not kws
[obj] = args
if isinstance(obj, types.IterableType):
return signature(obj.iterator_type, obj)
@infer
class IterNext(AbstractTemplate):
key = "iternext"
def generic(self, args, kws):
assert not kws
[it] = args
if isinstance(it, types.IteratorType):
return signature(types.Pair(it.yield_type, types.boolean), it)
@infer
class PairFirst(AbstractTemplate):
"""
Given a heterogeneous pair, return the first element.
"""
key = "pair_first"
def generic(self, args, kws):
assert not kws
[pair] = args
if isinstance(pair, types.Pair):
return signature(pair.first_type, pair)
@infer
class PairSecond(AbstractTemplate):
"""
Given a heterogeneous pair, return the second element.
"""
key = "pair_second"
def generic(self, args, kws):
assert not kws
[pair] = args
if isinstance(pair, types.Pair):
return signature(pair.second_type, pair)
def choose_result_bitwidth(*inputs):
return max(types.intp.bitwidth, *(tp.bitwidth for tp in inputs))
def choose_result_int(*inputs):
"""
Choose the integer result type for an operation on integer inputs,
according to the integer typing NBEP.
"""
bitwidth = choose_result_bitwidth(*inputs)
signed = any(tp.signed for tp in inputs)
return types.Integer.from_bitwidth(bitwidth, signed)
# The "machine" integer types to take into consideration for operator typing
# (according to the integer typing NBEP)
machine_ints = (
sorted(set((types.intp, types.int64))) +
sorted(set((types.uintp, types.uint64)))
)
# Explicit integer rules for binary operators; smaller ints will be
# automatically upcast.
integer_binop_cases = tuple(
signature(choose_result_int(op1, op2), op1, op2)
for op1, op2 in itertools.product(machine_ints, machine_ints)
)
class BinOp(ConcreteTemplate):
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op, op) for op in sorted(types.complex_domain)]
@infer_global(operator.add)
class BinOpAdd(BinOp):
pass
@infer_global(operator.iadd)
class BinOpAdd(BinOp):
pass
@infer_global(operator.sub)
class BinOpSub(BinOp):
pass
@infer_global(operator.isub)
class BinOpSub(BinOp):
pass
@infer_global(operator.mul)
class BinOpMul(BinOp):
pass
@infer_global(operator.imul)
class BinOpMul(BinOp):
pass
@infer_global(operator.mod)
class BinOpMod(ConcreteTemplate):
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
@infer_global(operator.imod)
class BinOpMod(ConcreteTemplate):
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
@infer_global(operator.truediv)
class BinOpTrueDiv(ConcreteTemplate):
cases = [signature(types.float64, op1, op2)
for op1, op2 in itertools.product(machine_ints, machine_ints)]
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op, op) for op in sorted(types.complex_domain)]
@infer_global(operator.itruediv)
class BinOpTrueDiv(ConcreteTemplate):
cases = [signature(types.float64, op1, op2)
for op1, op2 in itertools.product(machine_ints, machine_ints)]
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op, op) for op in sorted(types.complex_domain)]
@infer_global(operator.floordiv)
class BinOpFloorDiv(ConcreteTemplate):
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
@infer_global(operator.ifloordiv)
class BinOpFloorDiv(ConcreteTemplate):
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
@infer_global(divmod)
class DivMod(ConcreteTemplate):
_tys = machine_ints + sorted(types.real_domain)
cases = [signature(types.UniTuple(ty, 2), ty, ty) for ty in _tys]
@infer_global(operator.pow)
class BinOpPower(ConcreteTemplate):
cases = list(integer_binop_cases)
# Ensure that float32 ** int doesn't go through DP computations
cases += [signature(types.float32, types.float32, op)
for op in (types.int32, types.int64, types.uint64)]
cases += [signature(types.float64, types.float64, op)
for op in (types.int32, types.int64, types.uint64)]
cases += [signature(op, op, op)
for op in sorted(types.real_domain)]
cases += [signature(op, op, op)
for op in sorted(types.complex_domain)]
@infer_global(operator.ipow)
class BinOpPower(ConcreteTemplate):
cases = list(integer_binop_cases)
# Ensure that float32 ** int doesn't go through DP computations
cases += [signature(types.float32, types.float32, op)
for op in (types.int32, types.int64, types.uint64)]
cases += [signature(types.float64, types.float64, op)
for op in (types.int32, types.int64, types.uint64)]
cases += [signature(op, op, op)
for op in sorted(types.real_domain)]
cases += [signature(op, op, op)
for op in sorted(types.complex_domain)]
@infer_global(pow)
class PowerBuiltin(BinOpPower):
# TODO add 3 operand version
pass
class BitwiseShiftOperation(ConcreteTemplate):
# For bitshifts, only the first operand's signedness matters
# to choose the operation's signedness (the second operand
# should always be positive but will generally be considered
# signed anyway, since it's often a constant integer).
# (also, see issue #1995 for right-shifts)
# The RHS type is fixed to 64-bit signed/unsigned ints.
# The implementation will always cast the operands to the width of the
# result type, which is the widest between the LHS type and (u)intp.
cases = [signature(max(op, types.intp), op, op2)
for op in sorted(types.signed_domain)
for op2 in [types.uint64, types.int64]]
cases += [signature(max(op, types.uintp), op, op2)
for op in sorted(types.unsigned_domain)
for op2 in [types.uint64, types.int64]]
unsafe_casting = False
@infer_global(operator.lshift)
class BitwiseLeftShift(BitwiseShiftOperation):
pass
@infer_global(operator.ilshift)
class BitwiseLeftShift(BitwiseShiftOperation):
pass
@infer_global(operator.rshift)
class BitwiseRightShift(BitwiseShiftOperation):
pass
@infer_global(operator.irshift)
class BitwiseRightShift(BitwiseShiftOperation):
pass
class BitwiseLogicOperation(BinOp):
cases = [signature(types.boolean, types.boolean, types.boolean)]
cases += list(integer_binop_cases)
unsafe_casting = False
@infer_global(operator.and_)
class BitwiseAnd(BitwiseLogicOperation):
pass
@infer_global(operator.iand)
class BitwiseAnd(BitwiseLogicOperation):
pass
@infer_global(operator.or_)
class BitwiseOr(BitwiseLogicOperation):
pass
@infer_global(operator.ior)
class BitwiseOr(BitwiseLogicOperation):
pass
@infer_global(operator.xor)
class BitwiseXor(BitwiseLogicOperation):
pass
@infer_global(operator.ixor)
class BitwiseXor(BitwiseLogicOperation):
pass
# Bitwise invert and negate are special: we must not upcast the operand
# for unsigned numbers, as that would change the result.
# (i.e. ~np.int8(0) == 255 but ~np.int32(0) == 4294967295).
@infer_global(operator.invert)
class BitwiseInvert(ConcreteTemplate):
# Note Numba follows the Numpy semantics of returning a bool,
# while Python returns an int. This makes it consistent with
# np.invert() and makes array expressions correct.
cases = [signature(types.boolean, types.boolean)]
cases += [signature(choose_result_int(op), op) for op in sorted(types.unsigned_domain)]
cases += [signature(choose_result_int(op), op) for op in sorted(types.signed_domain)]
unsafe_casting = False
class UnaryOp(ConcreteTemplate):
cases = [signature(choose_result_int(op), op) for op in sorted(types.unsigned_domain)]
cases += [signature(choose_result_int(op), op) for op in sorted(types.signed_domain)]
cases += [signature(op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op) for op in sorted(types.complex_domain)]
cases += [signature(types.intp, types.boolean)]
@infer_global(operator.neg)
class UnaryNegate(UnaryOp):
pass
@infer_global(operator.pos)
class UnaryPositive(UnaryOp):
pass
@infer_global(operator.not_)
class UnaryNot(ConcreteTemplate):
cases = [signature(types.boolean, types.boolean)]
cases += [signature(types.boolean, op) for op in sorted(types.signed_domain)]
cases += [signature(types.boolean, op) for op in sorted(types.unsigned_domain)]
cases += [signature(types.boolean, op) for op in sorted(types.real_domain)]
cases += [signature(types.boolean, op) for op in sorted(types.complex_domain)]
class OrderedCmpOp(ConcreteTemplate):
cases = [signature(types.boolean, types.boolean, types.boolean)]
cases += [signature(types.boolean, op, op) for op in sorted(types.signed_domain)]
cases += [signature(types.boolean, op, op) for op in sorted(types.unsigned_domain)]
cases += [signature(types.boolean, op, op) for op in sorted(types.real_domain)]
class UnorderedCmpOp(ConcreteTemplate):
cases = OrderedCmpOp.cases + [
signature(types.boolean, op, op) for op in sorted(types.complex_domain)]
@infer_global(operator.lt)
class CmpOpLt(OrderedCmpOp):
pass
@infer_global(operator.le)
class CmpOpLe(OrderedCmpOp):
pass
@infer_global(operator.gt)
class CmpOpGt(OrderedCmpOp):
pass
@infer_global(operator.ge)
class CmpOpGe(OrderedCmpOp):
pass
# more specific overloads should be registered first
@infer_global(operator.eq)
class ConstOpEq(AbstractTemplate):
def generic(self, args, kws):
assert not kws
(arg1, arg2) = args
if isinstance(arg1, types.Literal) and isinstance(arg2, types.Literal):
return signature(types.boolean, arg1, arg2)
@infer_global(operator.ne)
class ConstOpNotEq(ConstOpEq):
pass
@infer_global(operator.eq)
class CmpOpEq(UnorderedCmpOp):
pass
@infer_global(operator.ne)
class CmpOpNe(UnorderedCmpOp):
pass
class TupleCompare(AbstractTemplate):
def generic(self, args, kws):
[lhs, rhs] = args
if isinstance(lhs, types.BaseTuple) and isinstance(rhs, types.BaseTuple):
for u, v in zip(lhs, rhs):
# Check element-wise comparability
res = self.context.resolve_function_type(self.key, (u, v), {})
if res is None:
break
else:
return signature(types.boolean, lhs, rhs)
@infer_global(operator.eq)
class TupleEq(TupleCompare):
pass
@infer_global(operator.ne)
class TupleNe(TupleCompare):
pass
@infer_global(operator.ge)
class TupleGe(TupleCompare):
pass
@infer_global(operator.gt)
class TupleGt(TupleCompare):
pass
@infer_global(operator.le)
class TupleLe(TupleCompare):
pass
@infer_global(operator.lt)
class TupleLt(TupleCompare):
pass
@infer_global(operator.add)
class TupleAdd(AbstractTemplate):
def generic(self, args, kws):
if len(args) == 2:
a, b = args
if (isinstance(a, types.BaseTuple) and isinstance(b, types.BaseTuple)
and not isinstance(a, types.BaseNamedTuple)
and not isinstance(b, types.BaseNamedTuple)):
res = types.BaseTuple.from_types(tuple(a) + tuple(b))
return signature(res, a, b)
class CmpOpIdentity(AbstractTemplate):
def generic(self, args, kws):
[lhs, rhs] = args
return signature(types.boolean, lhs, rhs)
@infer_global(operator.is_)
class CmpOpIs(CmpOpIdentity):
pass
@infer_global(operator.is_not)
class CmpOpIsNot(CmpOpIdentity):
pass
def normalize_1d_index(index):
"""
Normalize the *index* type (an integer or slice) for indexing a 1D
sequence.
"""
if isinstance(index, types.SliceType):
return index
elif isinstance(index, types.Integer):
return types.intp if index.signed else types.uintp
@infer_global(operator.getitem)
class GetItemCPointer(AbstractTemplate):
def generic(self, args, kws):
assert not kws
ptr, idx = args
if isinstance(ptr, types.CPointer) and isinstance(idx, types.Integer):
return signature(ptr.dtype, ptr, normalize_1d_index(idx))
@infer_global(operator.setitem)
class SetItemCPointer(AbstractTemplate):
def generic(self, args, kws):
assert not kws
ptr, idx, val = args
if isinstance(ptr, types.CPointer) and isinstance(idx, types.Integer):
return signature(types.none, ptr, normalize_1d_index(idx), ptr.dtype)
@infer_global(len)
class Len(AbstractTemplate):
def generic(self, args, kws):
assert not kws
(val,) = args
if isinstance(val, (types.Buffer, types.BaseTuple)):
return signature(types.intp, val)
elif isinstance(val, (types.RangeType)):
return signature(val.dtype, val)
@infer_global(tuple)
class TupleConstructor(AbstractTemplate):
def generic(self, args, kws):
assert not kws
# empty tuple case
if len(args) == 0:
return signature(types.Tuple(()))
(val,) = args
# tuple as input
if isinstance(val, types.BaseTuple):
return signature(val, val)
@infer_global(operator.contains)
class Contains(AbstractTemplate):
def generic(self, args, kws):
assert not kws
(seq, val) = args
if isinstance(seq, (types.Sequence)):
return signature(types.boolean, seq, val)
@infer_global(operator.truth)
class TupleBool(AbstractTemplate):
def generic(self, args, kws):
assert not kws
(val,) = args
if isinstance(val, (types.BaseTuple)):
return signature(types.boolean, val)
@infer
class StaticGetItemTuple(AbstractTemplate):
key = "static_getitem"
def generic(self, args, kws):
tup, idx = args
ret = None
if not isinstance(tup, types.BaseTuple):
return
if isinstance(idx, int):
try:
ret = tup.types[idx]
except IndexError:
raise errors.NumbaIndexError("tuple index out of range")
elif isinstance(idx, slice):
ret = types.BaseTuple.from_types(tup.types[idx])
if ret is not None:
sig = signature(ret, *args)
return sig
@infer
class StaticGetItemLiteralList(AbstractTemplate):
key = "static_getitem"
def generic(self, args, kws):
tup, idx = args
ret = None
if not isinstance(tup, types.LiteralList):
return
if isinstance(idx, int):
ret = tup.types[idx]
if ret is not None:
sig = signature(ret, *args)
return sig
@infer
class StaticGetItemLiteralStrKeyDict(AbstractTemplate):
key = "static_getitem"
def generic(self, args, kws):
tup, idx = args
ret = None
if not isinstance(tup, types.LiteralStrKeyDict):
return
if isinstance(idx, str):
if idx in tup.fields:
lookup = tup.fields.index(idx)
else:
raise errors.NumbaKeyError(f"Key '{idx}' is not in dict.")
ret = tup.types[lookup]
if ret is not None:
sig = signature(ret, *args)
return sig
# Generic implementation for "not in"
@infer
class GenericNotIn(AbstractTemplate):
key = "not in"
def generic(self, args, kws):
args = args[::-1]
sig = self.context.resolve_function_type(operator.contains, args, kws)
return signature(sig.return_type, *sig.args[::-1])
#-------------------------------------------------------------------------------
@infer_getattr
class MemoryViewAttribute(AttributeTemplate):
key = types.MemoryView
def resolve_contiguous(self, buf):
return types.boolean
def resolve_c_contiguous(self, buf):
return types.boolean
def resolve_f_contiguous(self, buf):
return types.boolean
def resolve_itemsize(self, buf):
return types.intp
def resolve_nbytes(self, buf):
return types.intp
def resolve_readonly(self, buf):
return types.boolean
def resolve_shape(self, buf):
return types.UniTuple(types.intp, buf.ndim)
def resolve_strides(self, buf):
return types.UniTuple(types.intp, buf.ndim)
def resolve_ndim(self, buf):
return types.intp
#-------------------------------------------------------------------------------
@infer_getattr
class BooleanAttribute(AttributeTemplate):
key = types.Boolean
def resolve___class__(self, ty):
return types.NumberClass(ty)
@bound_function("number.item")
def resolve_item(self, ty, args, kws):
assert not kws
if not args:
return signature(ty)
@infer_getattr
class NumberAttribute(AttributeTemplate):
key = types.Number
def resolve___class__(self, ty):
return types.NumberClass(ty)
def resolve_real(self, ty):
return getattr(ty, "underlying_float", ty)
def resolve_imag(self, ty):
return getattr(ty, "underlying_float", ty)
@bound_function("complex.conjugate")
def resolve_conjugate(self, ty, args, kws):
assert not args
assert not kws
return signature(ty)
@bound_function("number.item")
def resolve_item(self, ty, args, kws):
assert not kws
if not args:
return signature(ty)
@infer_getattr
class NPTimedeltaAttribute(AttributeTemplate):
key = types.NPTimedelta
def resolve___class__(self, ty):
return types.NumberClass(ty)
@infer_getattr
class NPDatetimeAttribute(AttributeTemplate):
key = types.NPDatetime
def resolve___class__(self, ty):
return types.NumberClass(ty)
@infer_getattr
class SliceAttribute(AttributeTemplate):
key = types.SliceType
def resolve_start(self, ty):
return types.intp
def resolve_stop(self, ty):
return types.intp
def resolve_step(self, ty):
return types.intp
@bound_function("slice.indices")
def resolve_indices(self, ty, args, kws):
assert not kws
if len(args) != 1:
raise errors.NumbaTypeError(
"indices() takes exactly one argument (%d given)" % len(args)
)
typ, = args
if not isinstance(typ, types.Integer):
raise errors.NumbaTypeError(
"'%s' object cannot be interpreted as an integer" % typ
)
return signature(types.UniTuple(types.intp, 3), types.intp)
#-------------------------------------------------------------------------------
@infer_getattr
class NumberClassAttribute(AttributeTemplate):
key = types.NumberClass
def resolve___call__(self, classty):
"""
Resolve a number class's constructor (e.g. calling int(...))
"""
ty = classty.instance_type
def typer(val):
if isinstance(val, (types.BaseTuple, types.Sequence)):
# Array constructor, e.g. np.int32([1, 2])
fnty = self.context.resolve_value_type(np.array)
sig = fnty.get_call_type(self.context, (val, types.DType(ty)),
{})
return sig.return_type
elif isinstance(val, (types.Number, types.Boolean, types.IntEnumMember)):
# Scalar constructor, e.g. np.int32(42)
return ty
elif isinstance(val, (types.NPDatetime, types.NPTimedelta)):
# Constructor cast from datetime-like, e.g.
# > np.int64(np.datetime64("2000-01-01"))
if ty.bitwidth == 64:
return ty
else:
msg = (f"Cannot cast {val} to {ty} as {ty} is not 64 bits "
"wide.")
raise errors.TypingError(msg)
else:
if (isinstance(val, types.Array) and val.ndim == 0 and
val.dtype == ty):
# This is 0d array -> scalar degrading
return ty
else:
# unsupported
msg = f"Casting {val} to {ty} directly is unsupported."
if isinstance(val, types.Array):
# array casts are supported a different way.
msg += f" Try doing '<array>.astype(np.{ty})' instead"
raise errors.TypingError(msg)
return types.Function(make_callable_template(key=ty, typer=typer))
@infer_getattr
class TypeRefAttribute(AttributeTemplate):
key = types.TypeRef
def resolve___call__(self, classty):
"""
Resolve a number class's constructor (e.g. calling int(...))
Note:
This is needed because of the limitation of the current type-system
implementation. Specifically, the lack of a higher-order type
(i.e. passing the ``DictType`` vs ``DictType(key_type, value_type)``)
"""
ty = classty.instance_type
if isinstance(ty, type) and issubclass(ty, types.Type):
# Redirect the typing to a:
# @type_callable(ty)
# def typeddict_call(context):
# ...
# For example, see numba/typed/typeddict.py
# @type_callable(DictType)
# def typeddict_call(context):
class Redirect(object):
def __init__(self, context):
self.context = context
def __call__(self, *args, **kwargs):
result = self.context.resolve_function_type(ty, args, kwargs)
if hasattr(result, "pysig"):
self.pysig = result.pysig
return result
return types.Function(make_callable_template(key=ty,
typer=Redirect(self.context)))
#------------------------------------------------------------------------------
class MinMaxBase(AbstractTemplate):
def _unify_minmax(self, tys):
for ty in tys:
if not isinstance(ty, types.Number):
return
return self.context.unify_types(*tys)
def generic(self, args, kws):
"""
Resolve a min() or max() call.
"""
assert not kws
if not args:
return
if len(args) == 1:
# max(arg) only supported if arg is an iterable
if isinstance(args[0], types.BaseTuple):
tys = list(args[0])
if not tys:
raise TypeError("%s() argument is an empty tuple"
% (self.key.__name__,))
else:
return
else:
# max(*args)
tys = args
retty = self._unify_minmax(tys)
if retty is not None:
return signature(retty, *args)
@infer_global(max)
class Max(MinMaxBase):
pass
@infer_global(min)
class Min(MinMaxBase):
pass
@infer_global(round)
class Round(ConcreteTemplate):
cases = [
signature(types.intp, types.float32),
signature(types.int64, types.float64),
signature(types.float32, types.float32, types.intp),
signature(types.float64, types.float64, types.intp),
]
#------------------------------------------------------------------------------
@infer_global(bool)
class Bool(AbstractTemplate):
def generic(self, args, kws):
assert not kws
[arg] = args
if isinstance(arg, (types.Boolean, types.Number)):
return signature(types.boolean, arg)
# XXX typing for bool cannot be polymorphic because of the
# types.Function thing, so we redirect to the operator.truth
# intrinsic.
return self.context.resolve_function_type(operator.truth, args, kws)
@infer_global(int)
class Int(AbstractTemplate):
def generic(self, args, kws):
if kws:
raise errors.NumbaAssertionError('kws not supported')
[arg] = args
if isinstance(arg, types.Integer):
return signature(arg, arg)
if isinstance(arg, (types.Float, types.Boolean)):
return signature(types.intp, arg)
@infer_global(float)
class Float(AbstractTemplate):
def generic(self, args, kws):
assert not kws
[arg] = args
if arg not in types.number_domain:
raise errors.NumbaTypeError("float() only support for numbers")
if arg in types.complex_domain:
raise errors.NumbaTypeError("float() does not support complex")
if arg in types.integer_domain:
return signature(types.float64, arg)
elif arg in types.real_domain:
return signature(arg, arg)
@infer_global(complex)
class Complex(AbstractTemplate):
def generic(self, args, kws):
assert not kws
if len(args) == 1:
[arg] = args
if arg not in types.number_domain:
raise TypeError("complex() only support for numbers")
if arg == types.float32:
return signature(types.complex64, arg)
else:
return signature(types.complex128, arg)
elif len(args) == 2:
[real, imag] = args
if (real not in types.number_domain or
imag not in types.number_domain):
raise TypeError("complex() only support for numbers")
if real == imag == types.float32:
return signature(types.complex64, real, imag)
else:
return signature(types.complex128, real, imag)
#------------------------------------------------------------------------------
@infer_global(enumerate)
class Enumerate(AbstractTemplate):
def generic(self, args, kws):
assert not kws
it = args[0]
if len(args) > 1 and not isinstance(args[1], types.Integer):
raise errors.NumbaTypeError("Only integers supported as start "
"value in enumerate")
elif len(args) > 2:
#let python raise its own error
enumerate(*args)
if isinstance(it, types.IterableType):
enumerate_type = types.EnumerateType(it)
return signature(enumerate_type, *args)
@infer_global(zip)
class Zip(AbstractTemplate):
def generic(self, args, kws):
assert not kws
if all(isinstance(it, types.IterableType) for it in args):
zip_type = types.ZipType(args)
return signature(zip_type, *args)
@infer_global(iter)
class Iter(AbstractTemplate):
def generic(self, args, kws):
assert not kws
if len(args) == 1:
it = args[0]
if isinstance(it, types.IterableType):
return signature(it.iterator_type, *args)
@infer_global(next)
class Next(AbstractTemplate):
def generic(self, args, kws):
assert not kws
if len(args) == 1:
it = args[0]
if isinstance(it, types.IteratorType):
return signature(it.yield_type, *args)
#------------------------------------------------------------------------------
@infer_global(type)
class TypeBuiltin(AbstractTemplate):
def generic(self, args, kws):
assert not kws
if len(args) == 1:
# One-argument type() -> return the __class__
# Avoid literal types
arg = types.unliteral(args[0])
classty = self.context.resolve_getattr(arg, "__class__")
if classty is not None:
return signature(classty, *args)
#------------------------------------------------------------------------------
@infer_getattr
class OptionalAttribute(AttributeTemplate):
key = types.Optional
def generic_resolve(self, optional, attr):
return self.context.resolve_getattr(optional.type, attr)
#------------------------------------------------------------------------------
@infer_getattr
class DeferredAttribute(AttributeTemplate):
key = types.DeferredType
def generic_resolve(self, deferred, attr):
return self.context.resolve_getattr(deferred.get(), attr)
#------------------------------------------------------------------------------
@infer_global(get_type_min_value)
@infer_global(get_type_max_value)
class MinValInfer(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 1
assert isinstance(args[0], (types.DType, types.NumberClass))
return signature(args[0].dtype, *args)
#------------------------------------------------------------------------------
class IndexValue(object):
"""
Index and value
"""
def __init__(self, ind, val):
self.index = ind
self.value = val
def __repr__(self):
return 'IndexValue(%f, %f)' % (self.index, self.value)
class IndexValueType(types.Type):
def __init__(self, val_typ):
self.val_typ = val_typ
super(IndexValueType, self).__init__(
name='IndexValueType({})'.format(val_typ))
@typeof_impl.register(IndexValue)
def typeof_index(val, c):
val_typ = typeof_impl(val.value, c)
return IndexValueType(val_typ)
@type_callable(IndexValue)
def type_index_value(context):
def typer(ind, mval):
if ind == types.intp or ind == types.uintp:
return IndexValueType(mval)
return typer
@register_model(IndexValueType)
class IndexValueModel(models.StructModel):
def __init__(self, dmm, fe_type):
members = [
('index', types.intp),
('value', fe_type.val_typ),
]
models.StructModel.__init__(self, dmm, fe_type, members)
make_attribute_wrapper(IndexValueType, 'index', 'index')
make_attribute_wrapper(IndexValueType, 'value', 'value')
| |
import sys
import paddle.v2 as paddle
def seqToseq_net(source_dict_dim, target_dict_dim, is_generating=False):
### Network Architecture
word_vector_dim = 512 # dimension of word vector
decoder_size = 512 # dimension of hidden unit in GRU Decoder network
encoder_size = 512 # dimension of hidden unit in GRU Encoder network
beam_size = 3
max_length = 250
#### Encoder
src_word_id = paddle.layer.data(
name='source_language_word',
type=paddle.data_type.integer_value_sequence(source_dict_dim))
src_embedding = paddle.layer.embedding(
input=src_word_id,
size=word_vector_dim,
param_attr=paddle.attr.ParamAttr(name='_source_language_embedding'))
src_forward = paddle.networks.simple_gru(
input=src_embedding, size=encoder_size)
src_backward = paddle.networks.simple_gru(
input=src_embedding, size=encoder_size, reverse=True)
encoded_vector = paddle.layer.concat(input=[src_forward, src_backward])
#### Decoder
with paddle.layer.mixed(size=decoder_size) as encoded_proj:
encoded_proj += paddle.layer.full_matrix_projection(
input=encoded_vector)
backward_first = paddle.layer.first_seq(input=src_backward)
with paddle.layer.mixed(
size=decoder_size, act=paddle.activation.Tanh()) as decoder_boot:
decoder_boot += paddle.layer.full_matrix_projection(
input=backward_first)
def gru_decoder_with_attention(enc_vec, enc_proj, current_word):
decoder_mem = paddle.layer.memory(
name='gru_decoder', size=decoder_size, boot_layer=decoder_boot)
context = paddle.networks.simple_attention(
encoded_sequence=enc_vec,
encoded_proj=enc_proj,
decoder_state=decoder_mem)
with paddle.layer.mixed(size=decoder_size * 3) as decoder_inputs:
decoder_inputs += paddle.layer.full_matrix_projection(input=context)
decoder_inputs += paddle.layer.full_matrix_projection(
input=current_word)
gru_step = paddle.layer.gru_step(
name='gru_decoder',
input=decoder_inputs,
output_mem=decoder_mem,
size=decoder_size)
with paddle.layer.mixed(
size=target_dict_dim,
bias_attr=True,
act=paddle.activation.Softmax()) as out:
out += paddle.layer.full_matrix_projection(input=gru_step)
return out
decoder_group_name = "decoder_group"
group_input1 = paddle.layer.StaticInputV2(input=encoded_vector, is_seq=True)
group_input2 = paddle.layer.StaticInputV2(input=encoded_proj, is_seq=True)
group_inputs = [group_input1, group_input2]
if not is_generating:
trg_embedding = paddle.layer.embedding(
input=paddle.layer.data(
name='target_language_word',
type=paddle.data_type.integer_value_sequence(target_dict_dim)),
size=word_vector_dim,
param_attr=paddle.attr.ParamAttr(name='_target_language_embedding'))
group_inputs.append(trg_embedding)
# For decoder equipped with attention mechanism, in training,
# target embeding (the groudtruth) is the data input,
# while encoded source sequence is accessed to as an unbounded memory.
# Here, the StaticInput defines a read-only memory
# for the recurrent_group.
decoder = paddle.layer.recurrent_group(
name=decoder_group_name,
step=gru_decoder_with_attention,
input=group_inputs)
lbl = paddle.layer.data(
name='target_language_next_word',
type=paddle.data_type.integer_value_sequence(target_dict_dim))
cost = paddle.layer.classification_cost(input=decoder, label=lbl)
return cost
else:
# In generation, the decoder predicts a next target word based on
# the encoded source sequence and the last generated target word.
# The encoded source sequence (encoder's output) must be specified by
# StaticInput, which is a read-only memory.
# Embedding of the last generated word is automatically gotten by
# GeneratedInputs, which is initialized by a start mark, such as <s>,
# and must be included in generation.
trg_embedding = paddle.layer.GeneratedInputV2(
size=target_dict_dim,
embedding_name='_target_language_embedding',
embedding_size=word_vector_dim)
group_inputs.append(trg_embedding)
beam_gen = paddle.layer.beam_search(
name=decoder_group_name,
step=gru_decoder_with_attention,
input=group_inputs,
bos_id=0,
eos_id=1,
beam_size=beam_size,
max_length=max_length)
return beam_gen
def main():
paddle.init(use_gpu=False, trainer_count=1)
is_generating = False
# source and target dict dim.
dict_size = 30000
source_dict_dim = target_dict_dim = dict_size
# train the network
if not is_generating:
cost = seqToseq_net(source_dict_dim, target_dict_dim)
parameters = paddle.parameters.create(cost)
# define optimize method and trainer
optimizer = paddle.optimizer.Adam(
learning_rate=5e-5,
regularization=paddle.optimizer.L2Regularization(rate=8e-4))
trainer = paddle.trainer.SGD(cost=cost,
parameters=parameters,
update_equation=optimizer)
# define data reader
wmt14_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.wmt14.train(dict_size), buf_size=8192),
batch_size=5)
# define event_handler callback
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 10 == 0:
print "\nPass %d, Batch %d, Cost %f, %s" % (
event.pass_id, event.batch_id, event.cost,
event.metrics)
else:
sys.stdout.write('.')
sys.stdout.flush()
# start to train
trainer.train(
reader=wmt14_reader, event_handler=event_handler, num_passes=2)
# generate a english sequence to french
else:
# use the first 3 samples for generation
gen_creator = paddle.dataset.wmt14.gen(dict_size)
gen_data = []
gen_num = 3
for item in gen_creator():
gen_data.append((item[0], ))
if len(gen_data) == gen_num:
break
beam_gen = seqToseq_net(source_dict_dim, target_dict_dim, is_generating)
# get the pretrained model, whose bleu = 26.92
parameters = paddle.dataset.wmt14.model()
# prob is the prediction probabilities, and id is the prediction word.
beam_result = paddle.infer(
output_layer=beam_gen,
parameters=parameters,
input=gen_data,
field=['prob', 'id'])
# get the dictionary
src_dict, trg_dict = paddle.dataset.wmt14.get_dict(dict_size)
# the delimited element of generated sequences is -1,
# the first element of each generated sequence is the sequence length
seq_list = []
seq = []
for w in beam_result[1]:
if w != -1:
seq.append(w)
else:
seq_list.append(' '.join([trg_dict.get(w) for w in seq[1:]]))
seq = []
prob = beam_result[0]
beam_size = 3
for i in xrange(gen_num):
print "\n*******************************************************\n"
print "src:", ' '.join(
[src_dict.get(w) for w in gen_data[i][0]]), "\n"
for j in xrange(beam_size):
print "prob = %f:" % (prob[i][j]), seq_list[i * beam_size + j]
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import pytest
import d1_common.date_time
import d1_test.d1_test_case
T1 = 1999, 1, 2, 3, 4, 5, 789000
T2 = 1999, 3, 4, 5, 6, 7, 901000
TZ_MST = d1_common.date_time.FixedOffset("MST", -7)
TZ_YEKT = d1_common.date_time.FixedOffset("YEKT", 6)
T1_NAIVE = datetime.datetime(*T1, tzinfo=None)
T2_NAIVE = datetime.datetime(*T2, tzinfo=None)
T1_UTC = datetime.datetime(*T1, tzinfo=d1_common.date_time.UTC())
T2_UTC = datetime.datetime(*T2, tzinfo=d1_common.date_time.UTC())
T1_MST = datetime.datetime(*T1, tzinfo=TZ_MST)
T1_YEKT = datetime.datetime(*T1, tzinfo=TZ_YEKT)
# Converted to timestamp with http://www.epochconverter.com/
T1_UTC_EPOCH = 915246245.789
# The same point in time in different tz
T_ABS_1 = datetime.datetime(
2050, 7, 18, 10, 11, 12, 230000, d1_common.date_time.FixedOffset("TZ1", -5, 30)
)
T_ABS_2 = datetime.datetime(
2050, 7, 18, 21, 41, 12, 230000, d1_common.date_time.FixedOffset("TZ1", 7)
)
@pytest.fixture(
scope="function", params=[None, d1_common.date_time.UTC(), TZ_MST, TZ_YEKT]
)
def tz_fixture(request):
yield request.param
@pytest.fixture(
scope="function", params=[T1_NAIVE, T2_NAIVE, T1_UTC, T2_UTC, T1_MST, T1_YEKT]
)
def dt_fixture(request):
yield request.param
@pytest.fixture(
scope="function",
params=[
# Nearest 500 ms
(
0.5,
datetime.datetime(2020, 10, 11, 14, 10, 10, 100000),
datetime.datetime(2020, 10, 11, 14, 10, 10, 0),
),
(
0.5,
datetime.datetime(2020, 10, 11, 14, 25, 10, 300000),
datetime.datetime(2020, 10, 11, 14, 25, 10, 500000),
),
(
0.5,
datetime.datetime(2020, 10, 11, 14, 44, 10, 500000),
datetime.datetime(2020, 10, 11, 14, 44, 10, 500000),
),
(
0.5,
datetime.datetime(2020, 10, 11, 14, 45, 10, 800000),
datetime.datetime(2020, 10, 11, 14, 45, 11, 0),
),
# Nearest 2 seconds
(
2,
datetime.datetime(2020, 10, 11, 14, 10, 10, 100000),
datetime.datetime(2020, 10, 11, 14, 10, 10, 0),
),
(
2,
datetime.datetime(2020, 10, 11, 14, 10, 11, 300000),
datetime.datetime(2020, 10, 11, 14, 10, 12, 0),
),
(
2,
datetime.datetime(2020, 10, 11, 14, 44, 15, 500000),
datetime.datetime(2020, 10, 11, 14, 44, 16, 0),
),
(
2,
datetime.datetime(2020, 10, 11, 14, 45, 16, 700000),
datetime.datetime(2020, 10, 11, 14, 45, 16, 0),
),
# Nearest 10 seconds
(
10,
datetime.datetime(2020, 10, 11, 14, 10, 10, 100000),
datetime.datetime(2020, 10, 11, 14, 10, 10, 0),
),
(
10,
datetime.datetime(2020, 10, 11, 14, 25, 25, 300000),
datetime.datetime(2020, 10, 11, 14, 25, 30, 0),
),
(
10,
datetime.datetime(2020, 10, 11, 14, 44, 44, 500000),
datetime.datetime(2020, 10, 11, 14, 44, 40, 0),
),
(
10,
datetime.datetime(2020, 10, 11, 14, 45, 45, 700000),
datetime.datetime(2020, 10, 11, 14, 45, 50, 0),
),
# Nearest half hour
(
30 * 60,
datetime.datetime(2020, 10, 11, 14, 10, 10, 100000),
datetime.datetime(2020, 10, 11, 14, 0, 0),
),
(
30 * 60,
datetime.datetime(2020, 10, 11, 14, 25, 10, 300000),
datetime.datetime(2020, 10, 11, 14, 30, 0),
),
(
30 * 60,
datetime.datetime(2020, 10, 11, 14, 44, 10, 500000),
datetime.datetime(2020, 10, 11, 14, 30, 0),
),
(
30 * 60,
datetime.datetime(2020, 10, 11, 14, 45, 10, 700000),
datetime.datetime(2020, 10, 11, 15, 0, 0),
),
# Nearest 1 day
(
24 * 60 * 60,
datetime.datetime(2020, 4, 11, 8, 10, 10, 100000),
datetime.datetime(2020, 4, 11),
),
(
24 * 60 * 60,
datetime.datetime(2020, 4, 11, 14, 25, 10, 300000),
datetime.datetime(2020, 4, 12),
),
(
24 * 60 * 60,
datetime.datetime(2020, 4, 11, 16, 44, 10, 500000),
datetime.datetime(2020, 4, 12),
),
(
24 * 60 * 60,
datetime.datetime(2020, 4, 11, 21, 45, 10, 700000),
datetime.datetime(2020, 4, 12),
),
],
)
def rounding_fixture(request):
yield request.param
# noinspection PyShadowingNames
class TestDateTime(d1_test.d1_test_case.D1TestCase):
#
# Check
#
def test_1000(self):
"""has_tz(): Returns false for naive dt."""
assert not d1_common.date_time.has_tz(T1_NAIVE)
def test_1010(self):
"""has_tz(): Returns True for dt that has tz."""
assert d1_common.date_time.has_tz(T1_MST)
assert d1_common.date_time.has_tz(T2_UTC)
assert d1_common.date_time.has_tz(T1_YEKT)
def test_1020(self):
"""is_utc(): Returns False for naive dt."""
assert not d1_common.date_time.is_utc(T1_NAIVE)
def test_1030(self):
"""is_utc(): Returns False for dt with tz other than UTC."""
assert not d1_common.date_time.is_utc(T1_MST)
def test_1040(self):
"""is_utc(): Returns True for dt in UTC."""
assert d1_common.date_time.is_utc(T2_UTC)
def test_1050(self):
"""ts_from_dt(): Assumes naive dt to be in UTC."""
assert d1_common.date_time.ts_from_dt(
T1_NAIVE
) == d1_common.date_time.ts_from_dt(T1_UTC)
def test_1060(self):
"""ts_from_dt(): Includes tz."""
assert d1_common.date_time.ts_from_dt(T1_MST) != d1_common.date_time.ts_from_dt(
T1_UTC
)
def test_1070(self, dt_fixture):
"""dt_from_ts():
- Naive dt is assumed to be at UTC
- Round trips preserve original value
"""
dt_utc = d1_common.date_time.normalize_datetime_to_utc(dt_fixture)
assert (
d1_common.date_time.dt_from_ts(
d1_common.date_time.ts_from_dt(dt_fixture), dt_fixture.tzinfo
)
== dt_utc
)
def test_1080(self, rounding_fixture, tz_fixture):
"""round_to_nearest()"""
round_sec, t, t_rounded = rounding_fixture
t = t.replace(tzinfo=tz_fixture)
t_rounded = t_rounded.replace(tzinfo=tz_fixture)
logging.debug("round_sec={} t={} t_rounded={}".format(round_sec, t, t_rounded))
assert d1_common.date_time.round_to_nearest(t, round_sec) == t_rounded
def test_1090(self, rounding_fixture, tz_fixture):
"""are_equal(): Returns True if two naive dts are equal to within the fuzz
factor."""
round_sec, t, t_rounded = rounding_fixture
t = t.replace(tzinfo=tz_fixture)
t_rounded = t_rounded.replace(tzinfo=tz_fixture)
logging.debug("round_sec={} t={} t_rounded={}".format(round_sec, t, t_rounded))
assert d1_common.date_time.are_equal(t, t_rounded, round_sec)
def test_1100(self):
"""are_equal(): Returns True when comparing the same point in time specified in
two different tz."""
assert d1_common.date_time.are_equal(T_ABS_1, T_ABS_2)
#
# Conversion
#
def test_1110(self):
"""ts_from_dt(): Assumes naive datetime is in UTC."""
assert d1_common.date_time.ts_from_dt(T1_NAIVE) == T1_UTC_EPOCH
def test_1120(self):
"""ts_from_dt(): Includes timezone (MST, UTC-7)"""
assert d1_common.date_time.ts_from_dt(T1_MST) == T1_UTC_EPOCH + 7 * 3600
def test_1130(self):
"""ts_from_dt(): Includes timezone (YEKT, UTC+6)"""
assert d1_common.date_time.ts_from_dt(T1_YEKT) == T1_UTC_EPOCH - 6 * 3600
def test_1140(self):
"""http_datetime_str_from_dt(): Assumes naive datetime is in UTC."""
assert (
d1_common.date_time.http_datetime_str_from_dt(T1_NAIVE)
== "Sat, 02 Jan 1999 03:04:05 GMT"
)
def test_1150(self):
"""http_datetime_str_from_dt(): Includes timezone (MST, UTC-7)"""
assert (
d1_common.date_time.http_datetime_str_from_dt(T1_MST)
== "Sat, 02 Jan 1999 10:04:05 GMT"
)
def test_1160(self):
"""http_datetime_str_from_dt() includes timezone (YEKT, UTC+6)"""
assert (
d1_common.date_time.http_datetime_str_from_dt(T1_YEKT)
== "Fri, 01 Jan 1999 21:04:05 GMT"
)
def _from_http_datetime(self, that_fateful_day_in_november_94):
d = d1_common.date_time.dt_from_http_datetime_str(
that_fateful_day_in_november_94
)
assert d == d1_common.date_time.create_utc_datetime(1994, 11, 6, 8, 49, 37)
def test_1170(self):
"""from_http_datetime(): RFC 822, updated by RFC 1123."""
self._from_http_datetime("Sun, 06 Nov 1994 08:49:37 GMT")
def test_1180(self):
"""from_http_datetime(): RFC 850, obsoleted by RFC 1036."""
self._from_http_datetime("Sunday, 06-Nov-94 08:49:37 GMT")
def test_1190(self):
"""from_http_datetime(): ANSI C's asctime() format."""
self._from_http_datetime("Sun Nov 6 08:49:37 1994")
def test_1200(self):
"""is_utc(): Returns False for naive datetime."""
assert not d1_common.date_time.is_utc(T1_NAIVE)
def test_1210(self):
"""is_utc(): Returns False for timezone aware datetime not in UTC (MST,
UTC-7)"""
assert not d1_common.date_time.is_utc(T1_MST)
def test_1220(self):
"""is_utc(): Returns False for timezone aware datetime not in UTC (YEKT,
UTC+6)"""
assert not d1_common.date_time.is_utc(T1_YEKT)
def test_1230(self):
"""is_utc(): Returns True for datetime with tz in UTC."""
assert d1_common.date_time.is_utc(T2_UTC)
def test_1240(self):
"""normalize_datetime_to_utc(): Adjusts for tz."""
t1_utc = d1_common.date_time.normalize_datetime_to_utc(T_ABS_1)
t2_utc = d1_common.date_time.normalize_datetime_to_utc(T_ABS_2)
assert d1_common.date_time.is_utc(t1_utc)
assert d1_common.date_time.is_utc(t2_utc)
assert d1_common.date_time.are_equal(t1_utc, t2_utc)
def test_1250(self):
"""normalize_datetime_to_utc(): Assumes that naive dt is in UTC."""
utc_dt = d1_common.date_time.normalize_datetime_to_utc(T2_NAIVE)
assert d1_common.date_time.is_utc(utc_dt)
assert d1_common.date_time.are_equal(utc_dt, T2_NAIVE)
def test_1260(self):
"""normalize_datetime_to_utc(): Includes tz."""
utc_dt = d1_common.date_time.normalize_datetime_to_utc(T1_YEKT)
assert d1_common.date_time.is_utc(utc_dt)
assert d1_common.date_time.are_equal(utc_dt, T1_YEKT)
| |
import ui.SearchTabUI
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QVariant
import Options
import Queries
import datetime
import EDDB
import EliteLogAnalyzer
import EdceWrapper
import SpaceTime
import time
import ui.TabAbstract
import ThreadWorker
import Powers
class SearchTab(QtWidgets.QWidget, ui.SearchTabUI.Ui_Dialog, ui.TabAbstract.TabAbstract):
_resultsUpdated = QtCore.pyqtSignal([list])
def __init__(self, db, analyzer, tabName, mainwindow):
super(QtWidgets.QWidget, self).__init__()
self.setupUi(self)
self.tabName = tabName
self.db = db
self.mainwindow = mainwindow
self.result = []
self.currentSystem = None
self.currentBase = None
self.targetSystem = None
self.targetBase = None
self.searchType='direct'
self.searchBtn.clicked.connect(self.searchBtnPressed)
self.model = SearchTab.TableModel(None, self)
self.SearchResultTable.setModel(self.model)
self.getCurrentBtn.clicked.connect(self._setCurrentSystemToTab)
self.targetGetCurrentBtn.clicked.connect(self._setCurrentSystemToTabTarget)
self.searchTypeCombo.currentIndexChanged.connect(self._searchtypeChanged)
self.analyzer = analyzer
self.searchTypes=[
('direct','Direct trades'),
('target','On way to Target'),
('station_exports','Exports from current Station'),
('system_exports','Exports from current System'),
('loop','Loop route'),
('long','Continuous route'),
('singles','Single trades'),
]
self.searchTypeCombo.clear()
for s in self.searchTypes:
self.searchTypeCombo.addItem(s[1],s[0])
self.currentSystemCombo.currentIndexChanged.connect(self._refreshCurrentStationlist)
self.targetSystemCombo.currentIndexChanged.connect(self._refreshTargetStationlist)
self.minProfitSpinBox.valueChanged.connect(self._minProfitChanged)
self.graphDepthSpin.valueChanged.connect(self._graphDepthChanged)
self.graphMinDepthSpin.valueChanged.connect(self._graphDepthChanged)
systemlist=self.db.getSystemNameList()
self.currentSystemCombo.clear()
self.currentSystemCombo.addItems( systemlist )
self.targetSystemCombo.clear()
self.targetSystemCombo.addItems( systemlist )
self._restoreSearchStatus()
self._resultsUpdated.connect(self._updateResults)
self.currentWorker = None
def _setSearchProgress(self, status):
if status:
self.searchBtn.setText("Stop search")
else:
self.searchBtn.setText("Search")
def _minProfitChanged(self):
maxval=750
minval=400
value=int(self.minProfitSpinBox.value())
value=(value-minval)/(maxval-minval)
redness=max(0.0,min(1.0,value))
self.minProfitSpinBox.setStyleSheet("background:rgb(255,"+str(255*redness)+","+str(255*redness)+")")
def _graphDepthChanged(self):
if self.graphDepthSpin.value()<self.graphMinDepthSpin.value():
self.graphDepthSpin.setStyleSheet("background:rgb(255,0,0)")
self.graphMinDepthSpin.setStyleSheet("background:rgb(255,0,0)")
else:
self.graphDepthSpin.setStyleSheet("background:rgb(255,255,255)")
self.graphMinDepthSpin.setStyleSheet("background:rgb(255,255,255)")
def _updateResults(self, data):
self.result = data
self.model.refeshData()
self._setSearchProgress(False)
self.currentWorker = None
print("Search done!")
if len(data)==0:
print("Search resulted in 0 matches. Keep min hops low and try lower minimum profits to widen the search (note: this also takes longer)")
def _searchtypeChanged(self,idx):
#searchtype=self.searchTypeCombo.currentIndex()
searchtype=self.searchTypeCombo.itemData(idx)
if searchtype in ['target','direct']:
self.targetSystemCombo.setEnabled(True)
self.targetStationCombo.setEnabled(True)
else:
self.targetSystemCombo.setEnabled(False)
self.targetStationCombo.setEnabled(False)
if searchtype in ['singles','direct']:
self.graphDepthSpin.setEnabled(False)
self.graphMinDepthSpin.setEnabled(False)
else:
self.graphDepthSpin.setEnabled(True)
self.graphMinDepthSpin.setEnabled(True)
if searchtype in ['system_exports','loop','long','singles']:
self.currentStationCombo.setEnabled(False)
else:
self.currentStationCombo.setEnabled(True)
if searchtype in ['direct']:
self.maxDistanceSpinBox.setEnabled(False)
self.minProfitSpinBox.setEnabled(False)
else:
self.maxDistanceSpinBox.setEnabled(True)
self.minProfitSpinBox.setEnabled(True)
def setTabName(self, name):
self.tabName = name
def getTabName(self):
return "Search {0}".format(self.tabName)
def getType(self):
return "search"
def dispose(self):
self._saveSearchStatus()
def _optName(self, name):
return "search_tab__{0}_{1}".format(name, self.tabName)
def refreshData(self):
self.model.refeshData()
def _refreshCurrentStationlist(self):
if self.currentSystem is None or self.currentSystem.getName()!=self.currentSystemCombo.currentText():
self.setCurrentSystem(self.currentSystemCombo.currentText(),refreshstations=False)
if self.currentSystem is None:
print('Current system not set')
return
currentSystemStations=self.currentSystem.getStations()
currentSystemStations.sort(key=lambda o: o.getDistance() if o.getDistance() is not None else 99999999) # sort by distance
currentSystemStations=[o.getName() for o in currentSystemStations]
self.currentStationCombo.clear()
self.currentStationCombo.addItems( ['ANY'] )
self.currentStationCombo.addItems( currentSystemStations )
def setCurrentSystem(self, system, refreshstations=True):
systems = self.db.getSystemByName(system)
if len(systems)==0:
print('System not in db')
return
system=systems[0]
self.currentSystem = system
self.currentSystemCombo.setEditText(self.currentSystem.getName())
if refreshstations:
self._refreshCurrentStationlist()
self.model.refeshData()
def setCurrentBase(self, base):
if self.currentSystem is None:
print('Current system not set')
return
bases=self.currentSystem.getStations()
baseo=None
for bo in bases:
if bo.getName()==base:
baseo=bo
break
if baseo is None:
print('Station not in db')
self.currentBase = None
self.currentStationCombo.setEditText('ANY')
return
self.currentBase = baseo
self.currentStationCombo.setEditText(self.currentBase.getName())
def _refreshTargetStationlist(self):
if self.targetSystem is None or self.targetSystem.getName()!=self.targetSystemCombo.currentText():
self.setTargetSystem(self.targetSystemCombo.currentText(),refreshstations=False)
if self.targetSystem is None:
print('Target system not set')
return
targetSystemStations=self.targetSystem.getStations()
targetSystemStations.sort(key=lambda o: o.getDistance() if o.getDistance() is not None else 99999999) # sort by distance
targetSystemStations=[o.getName() for o in targetSystemStations]
self.targetStationCombo.clear()
self.targetStationCombo.addItems( ['ANY'] )
self.targetStationCombo.addItems( targetSystemStations )
def setTargetSystem(self, system, refreshstations=True):
systems = self.db.getSystemByName(system)
if len(systems)==0:
print('System not in db')
return
system=systems[0]
self.targetSystem = system
self.targetSystemCombo.setEditText(self.targetSystem.getName())
if refreshstations:
self._refreshTargetStationlist()
self.model.refeshData()
def setTargetBase(self, base):
if self.targetSystem is None:
print('Target system not set')
return
bases=self.targetSystem.getStations()
baseo=None
for bo in bases:
if bo.getName()==base:
baseo=bo
break
if baseo is None:
print('Station not in db')
self.targetBase = None
self.targetStationCombo.setEditText('ANY')
return
self.targetBase = baseo
self.targetStationCombo.setEditText(self.targetBase.getName())
def _setCurrentSystemToTab(self):
self.setCurrentSystem(self.mainwindow.currentStatus['System'])
self.setCurrentBase(self.mainwindow.currentStatus['Base'])
def _setCurrentSystemToTabTarget(self):
self.setTargetSystem(self.mainwindow.currentStatus['System'])
self.setTargetBase(self.mainwindow.currentStatus['Base'])
def _restoreSearchStatus(self):
self.currentSystemCombo.setCurrentText(Options.get(self._optName("current_system"), "Sol"))
self.targetSystemCombo.setCurrentText(Options.get(self._optName("target_system"), "Lave"))
self.maxDistanceSpinBox.setValue(int(Options.get(self._optName("maximum_distance"), "50")))
self.minProfitSpinBox.setValue(int(Options.get(self._optName("minimum_profit"), "1000")))
self.graphDepthSpin.setValue(int(Options.get(self._optName("search_max_depth"), "5")))
self.graphMinDepthSpin.setValue(int(Options.get(self._optName("search_min_depth"), "1")))
self.smugglingCheckBox.setChecked(Options.get(self._optName("blackmarket"), "0")=='1')
self._refreshCurrentStationlist() # populate station lists
self._refreshTargetStationlist()
self.currentStationCombo.setCurrentText(Options.get(self._optName("current_station"), "Abraham Lincoln"))
self.targetStationCombo.setCurrentText(Options.get(self._optName("target_station"), "Lave Station"))
self.searchType=Options.get(self._optName("search_type"), "direct")
if self.searchType.isdigit():
self.searchType=self.searchTypes[int(self.searchType)][0] # old version shim
for si in range(len(self.searchTypes)):
if self.searchTypes[si][0]==self.searchType:
self.searchTypeCombo.setCurrentIndex(si)
self._searchtypeChanged(si)
def _saveSearchStatus(self):
Options.set(self._optName("current_system"), self.currentSystemCombo.currentText())
Options.set(self._optName("target_system"), self.targetSystemCombo.currentText())
Options.set(self._optName("maximum_distance"), self.maxDistanceSpinBox.value())
Options.set(self._optName("minimum_profit"), self.minProfitSpinBox.value())
Options.set(self._optName("search_type"), self.searchTypeCombo.currentIndex())
Options.set(self._optName("search_max_depth"), self.graphDepthSpin.value())
Options.set(self._optName("search_min_depth"), self.graphMinDepthSpin.value())
Options.set(self._optName("current_station"), self.currentStationCombo.currentText())
Options.set(self._optName("target_station"), self.targetStationCombo.currentText())
Options.set(self._optName("blackmarket"), self.smugglingCheckBox.isChecked() and '1' or '0')
#Options.set(self._optName("search_profitPh"), self.profitPhChk.isChecked() and "1" or "0")
def cancelSearch(self):
if self.currentWorker is not None:
print('Cancelled search!')
self.currentWorker.terminate()
self.currentWorker = None
self.model.refeshData()
self._setSearchProgress(False)
def searchBtnPressed(self):
if self.currentWorker is not None:
self.cancelSearch()
return
else:
self.startSearch()
def startSearch(self):
if self.currentWorker is not None: # handled elsewhere, or ignored
return
searchTypeIdx = int(self.searchTypeCombo.currentIndex())
self.searchType=self.searchTypeCombo.itemData(searchTypeIdx)
currentSystem = self.currentSystemCombo.currentText()
currentBase = self.currentStationCombo.currentText()
targetSystem = self.targetSystemCombo.currentText()
targetBase = self.targetStationCombo.currentText()
maxDistance = float(self.maxDistanceSpinBox.value())
jumprange = float(self.mainwindow.jumpRangeSpinBox.value())
minProfit = int(self.minProfitSpinBox.value())
minPadSize = int(self.mainwindow.minPadSizeCombo.currentIndex())
graphDepth = int(self.graphMinDepthSpin.value())
graphDepthmax = int(self.graphDepthSpin.value())
blackmarket = self.smugglingCheckBox.isChecked()
if graphDepth>graphDepthmax:
print("min hops have to be less than max hops!")
self.mainwindow.sounds.play('error')
return
if currentBase == 'ANY':
currentBase=None
if targetBase == 'ANY':
targetBase=None
pos = self.currentSystem.getPosition()
tpos = self.targetSystem.getPosition()
directionality=0.0 # todo: currently unused - remove?
queryparams=dict({
"x":pos[0],
"y":pos[1],
"z":pos[2],
"x2":tpos[0],
"y2":tpos[1],
"z2":tpos[2],
"directionality":directionality,
"maxdistance":maxDistance,
"minprofit":minProfit,
"landingPadSize":minPadSize,
"jumprange":jumprange,
"graphDepthMin":graphDepth,
"graphDepthMax":graphDepthmax,
"sourcesystem":None,
"sourcebase":None,
"targetsystem":None,
"targetbase":None,
"blackmarket":blackmarket
})
print("Querying database...")
searchFn = None
if self.searchType=='singles':
print("queryProfit")
searchFn = lambda : Queries.queryProfit(self.db, queryparams )
elif self.searchType=='loop':
print("queryProfitGraphLoops")
searchFn = lambda : Queries.queryProfitGraphLoops(self.db, queryparams )
elif self.searchType=='long':
print("queryProfitGraphDeadends")
searchFn = lambda : Queries.queryProfitGraphDeadends(self.db, queryparams )
elif self.searchType=='target':
queryparams['sourcesystem']=currentSystem
queryparams['sourcebase']=currentBase
queryparams['targetsystem']=targetSystem
queryparams['targetbase']=targetBase
print("queryProfitGraphTarget")
searchFn = lambda : Queries.queryProfitGraphTarget(self.db, queryparams )
elif self.searchType=='direct':
queryparams['sourcesystem']=currentSystem
queryparams['sourcebase']=currentBase
queryparams['targetsystem']=targetSystem
queryparams['targetbase']=targetBase
print("queryDirectTrades")
searchFn = lambda : Queries.queryDirectTrades(self.db, queryparams )
elif self.searchType in ['station_exports','system_exports']:
queryparams['sourcesystem']=currentSystem
queryparams['sourcebase']=currentBase
print("queryProfitGraphDeadends from current")
searchFn = lambda : Queries.queryProfitGraphDeadends(self.db, queryparams )
else:
print("unknown search type - we should not be here")
if searchFn is not None:
self.currentWorker = ThreadWorker.ThreadWorker(searchFn, lambda result: self._resultsUpdated.emit(result))
self.currentWorker.start()
self._setSearchProgress(True)
class TableModel(QtCore.QAbstractTableModel):
def __init__(self, parent, mw):
super().__init__(parent)
self.mw = mw
basictradetable=[
"_curdist",
"Asystemname",
"Abasename",
"Asupply",
"AexportPrice",
"commodityname",
"BimportPrice",
#"Bdemand",
"Bsystemname",
"Bbasename",
#"DistanceSq",
"SystemDistance",
"hours",
"profit",
"profitPh"
]
basictradetable_target=[
"Asystemname",
"Abasename",
"Asupply",
"AexportPrice",
"commodityname",
"BimportPrice",
#"Bdemand",
"Bsystemname",
"Bbasename",
#"DistanceSq",
"SystemDistance",
"hours",
"profit",
"profitPh",
"_targetdist"
]
self.columnorder=dict({
'station_exports':basictradetable,
'system_exports':basictradetable,
'loop':basictradetable,
'long':basictradetable,
'singles':basictradetable,
'target':basictradetable_target,
'direct':basictradetable
})
def rowCount(self, parent):
rows = len(self.mw.result)
return rows
def columnCount(self, parent):
return len(self.columnorder[self.mw.searchType])
def data(self, index, role):
if not index.isValid():
return None
if index.row() >= len(self.mw.result):
return None
data = self.mw.result[index.row()]
section=index.column()
columnorder=self.columnorder[self.mw.searchType]
if section >= len(columnorder):
return None
# roles: http://doc.qt.io/qt-5/qt.html#ItemDataRole-enum
########### ICONS ##################
if role == QtCore.Qt.DecorationRole:
if "celltype" not in data:
if columnorder[section] in ["Asystemname"]:
powerid=None
if data['Acontrolled'] is not None:
powerid=data['Acontrolled']
if data['Aexploited'] is not None:
powerid=data['Aexploited']
if powerid is not None and powerid != -1:
return QtGui.QPixmap("img/power_"+str(powerid)+".png")
if columnorder[section] in ["Bsystemname"]:
powerid=None
if data['Bcontrolled'] is not None:
powerid=data['Bcontrolled']
if data['Bexploited'] is not None:
powerid=data['Bexploited']
if powerid is not None and powerid != -1:
return QtGui.QPixmap("img/power_"+str(powerid)+".png")
if columnorder[section] in ["commodityname"]:
if data['blackmarket']==1:
return QtGui.QPixmap("img/illegal.png")
########### TEXT COLOR ###########
if role == QtCore.Qt.TextColorRole:
if "celltype" not in data:
if columnorder[section] in ["AexportPrice"]:
if int(data['AlastUpdated'])<time.time()-60*60*24*int(Options.get('Market-valid-days',7)):
return QtGui.QBrush(QtGui.QColor(255,255,0))
if columnorder[section] in ["Asupply"]:
if int(data['Asupply']<100):
return QtGui.QBrush(QtGui.QColor(255,255,0))
if columnorder[section] in ["BimportPrice"]:
if int(data['BlastUpdated'])<time.time()-60*60*24*int(Options.get('Market-valid-days',7)):
return QtGui.QBrush(QtGui.QColor(255,255,0))
if columnorder[section] in ["commodityname"]:
if data['blackmarket']==1:
return QtGui.QBrush(QtGui.QColor(250,250,250))
########### BACKGROUND COLOR ##########
if role == QtCore.Qt.BackgroundRole:
if "celltype" in data:
if data["celltype"]=='emptyrow':
return QtGui.QBrush(QtGui.QColor(255,255,255))
if data["celltype"]=='separatorrow':
return QtGui.QBrush(QtGui.QColor(200,200,200))
if columnorder[section] in ["Asystemname","Abasename"]:
if data['Aallegiance']==1:
return QtGui.QBrush(QtGui.QColor(200,255,200))
if data['Aallegiance']==2:
return QtGui.QBrush(QtGui.QColor(255,200,200))
if data['Aallegiance']==3:
return QtGui.QBrush(QtGui.QColor(200,200,255))
return QtGui.QBrush(QtGui.QColor(255,255,230))
if columnorder[section] in ["Bsystemname","Bbasename"]:
if data['Ballegiance']==1:
return QtGui.QBrush(QtGui.QColor(200,255,200))
if data['Ballegiance']==2:
return QtGui.QBrush(QtGui.QColor(255,200,200))
if data['Ballegiance']==3:
return QtGui.QBrush(QtGui.QColor(200,200,255))
return QtGui.QBrush(QtGui.QColor(255,255,230))
if columnorder[section] in ["AexportPrice"]:
r,g,b=self.mw.AgeToColor(data['AlastUpdated'])
return QtGui.QBrush(QtGui.QColor(r,g,b))
if columnorder[section] in ["BimportPrice"]:
r,g,b=self.mw.AgeToColor(data['BlastUpdated'])
return QtGui.QBrush(QtGui.QColor(r,g,b))
if columnorder[section] in ["Asupply"]:
b=(data['Asupply'])/5000
g=b*2
g=max(min(1,g),0)*255
b=max(min(1,b),0)*255
r=255
return QtGui.QBrush(QtGui.QColor(r,g,b))
if columnorder[section] in ["profit","Cprofit","totalprofit"]:
return QtGui.QBrush(QtGui.QColor(255,230,255))
if columnorder[section] in ["commodityname"]:
if data['blackmarket']==1:
return QtGui.QBrush(QtGui.QColor(0,0,0))
return QtGui.QBrush(QtGui.QColor(255,255,255)) # everything else is white
############ TOOLTIPS ###############
if role == QtCore.Qt.ToolTipRole:
if "averageprofit" in data: # this is a graph search
if columnorder[section] == "profit":
ret="Loop average profit: "+str(data["averageprofit"])\
+"\nLoop max profit: "+str(data["loopmaxprofit"])\
+"\nLoop min profit: "+str(data["loopminprofit"])
if "celltype" not in data:
ret+= "\nBuy for "+str(data["AexportPrice"])\
+"\nSell for "+str(data["BimportPrice"])\
+"\nProfit: "+str(data["profit"])
return ret
if columnorder[section] == "profitPh":
ret="Loop average profit: "+str(data["averageprofit"])\
+"\nLoop max profit: "+str(data["loopmaxprofit"])\
+"\nLoop min profit: "+str(data["loopminprofit"])
if "celltype" not in data:
ret+= "\nBuy for "+str(data["AexportPrice"])\
+"\nSell for "+str(data["BimportPrice"])\
+"\nProfit: "+str(data["profit"])\
+"\nProfit/h:"+str(int(data["profit"]/data["hours"]))
return ret
else:
if "celltype" in data:
return None
if columnorder[section] == "_curdist":
if self.mw.currentSystem is None:
return
else:
curname=self.mw.currentSystem.getName() # todo: ship range
pos=self.mw.currentSystem.getPosition()
dist=( (pos[0]-data["Ax"])**2 + (pos[1]-data["Ay"])**2 + (pos[2]-data["Az"])**2 ) ** 0.5
return "Distance from "+curname+" (current system)\n" \
"to "+data["Asystemname"]+" (commodity seller) is "+("%.2f" % dist)+"ly " \
"("+str("%.2f" % (SpaceTime.BaseToBase(dist)/60))+"min)"
elif columnorder[section] == "_Bcurdist":
if self.mw.currentSystem is None:
return
else:
curname=self.mw.currentSystem.getName() # todo: ship range
pos=self.mw.currentSystem.getPosition()
dist=( (pos[0]-data["Bx"])**2 + (pos[1]-data["By"])**2 + (pos[2]-data["Bz"])**2 ) ** 0.5
return "Distance from "+curname+" (current system)\n" \
"to "+data["Bsystemname"]+" (commodity seller) is "+("%.2f" % dist)+"ly " \
"("+str("%.2f" % (SpaceTime.BaseToBase(dist)/60))+"min)"
elif columnorder[section] == "AexportPrice":
return "Data "+str("%.2f" %((time.time()-data['AlastUpdated'])/(60*60*24)))+" days old"\
+"\nExport sales price: "+str(data["AexportPrice"])+"\nSupply: "+str(data["Asupply"])
elif columnorder[section] == "BexportPrice":
return "Export sales price: "+str(data["BexportPrice"])+"\nSupply: "+str(data["Bsupply"])
elif columnorder[section] == "commodityname":
return "Commodity "+data["commodityname"]\
+"\nData "+str("%.2f" %((time.time()-min(data['AlastUpdated'],data['BlastUpdated']))/(60*60*24)))+" days old"\
+"\nBuy for "+str(data["AexportPrice"])\
+"\nSell for "+str(data["BimportPrice"])\
+"\nProfit: "+str(data["profit"])\
+"\nGalactic average price: "+str(data["average"])
elif columnorder[section] == "Ccommodityname":
return "Commodity "+data["Ccommodityname"]\
+"\nBuy for "+str(data["BexportPrice"])\
+"\nSell for "+str(data["CimportPrice"])\
+"\nProfit: "+str(data["Cprofit"])\
+"\nGalactic average price: "+str(data["Caverage"])
elif columnorder[section] == "BimportPrice":
return "Data "+str("%.2f" %((time.time()-data['BlastUpdated'])/(60*60*24)))+" days old"\
+"\nImport buy price: "+str(data["BimportPrice"])+"\nDemand: "+str(data["Bdemand"])
elif columnorder[section] == "CimportPrice":
return "Import buy price: "+str(data["CimportPrice"])+"\nDemand: "+str(data["Cdemand"])
elif columnorder[section] in ["Asystemname","Abasename"]:
padsize={
None:"unknown",
0:'S',
1:'M',
2:'L'
}
returnstring=""
if data['Acontrolled'] is not None:
returnstring+='System controlled by '+Powers.valToName( data['Acontrolled'] ) +'\n'
elif data['Aexploited'] is not None and data['Aexploited'] != -1:
returnstring+='System exploited by '+Powers.valToName( data['Aexploited'] ) +'\n'
elif data['Aexploited'] == -1:
returnstring+='System contested by opposing powers\n'
if data['Aallegiance'] is not None and int(data['Aallegiance']) != 0:
allegiance={
0:'None',
1:'Allegiance',
2:'Federation',
3:'Empire'
}
returnstring+="Allegiance: "+allegiance[int(data['Aallegiance'])]+"\n"
returnstring+="System: "+data["Asystemname"]+"\n"
returnstring+="Station: "+data["Abasename"]+"\n"
returnstring+="Distance to star: "+str(data["Adistance"] is not None and (str(data["Adistance"])
+" ("+str("%.2f" % (SpaceTime.StarToBase(data["Adistance"])/60))+"min)") or "unknown")+"\n"
returnstring+="Landing pad size: "+padsize[data["AlandingPadSize"]]
return returnstring
elif columnorder[section] in ["Bsystemname","Bbasename"]:
padsize={
None:"unknown",
0:'S',
1:'M',
2:'L'
}
returnstring=""
if data['Bcontrolled'] is not None:
returnstring+='System controlled by '+Powers.valToName( data['Bcontrolled'] ) +'\n'
elif data['Bexploited'] is not None and data['Bexploited'] != -1:
returnstring+='System exploited by '+Powers.valToName( data['Bexploited'] ) +'\n'
elif data['Bexploited'] == -1:
returnstring+='System contested by opposing powers\n'
if data['Ballegiance'] is not None and int(data['Ballegiance']) != 0:
allegiance={
0:'None',
1:'Allegiance',
2:'Federation',
3:'Empire'
}
returnstring+="Allegiance: "+allegiance[int(data['Ballegiance'])]+"\n"
returnstring+="System: "+data["Bsystemname"]+"\n"
returnstring+="Station: "+data["Bbasename"]+"\n"
returnstring+="Distance to star: "+str(data["Bdistance"] is not None and (str(data["Bdistance"])
+ " ("+str("%.2f" %(SpaceTime.StarToBase(data["Bdistance"])/60))+"min)") or "unknown")+"\n"
returnstring+="Landing pad size: "+padsize[data["BlandingPadSize"]]
return returnstring
elif columnorder[section] == "DistanceSq":
return "Travel distance "+str(data["DistanceSq"]**0.5)+"ly + "+\
str(data["Bdistance"] is not None and data["Bdistance"] or "unknown")+"ls from star to station"
elif columnorder[section] == "SystemDistance":
return "Travel distance "+str(data["SystemDistance"])+"ly + "+\
str(data["Bdistance"] is not None and data["Bdistance"] or "unknown")+"ls from star to station\n"+\
str(data["Bdistance"] is not None and str("%.2f" % (SpaceTime.StarToBase(data["Bdistance"])/60))+"min" or "")
elif columnorder[section] == "profit":
return "Buy for "+str(data["AexportPrice"])\
+"\nSell for "+str(data["BimportPrice"])\
+"\nProfit: "+str(data["profit"])
elif columnorder[section] == "Cprofit":
return "Buy for "+str(data["BexportPrice"])\
+"\nSell for "+str(data["CimportPrice"])\
+"\nProfit: "+str(data["Cprofit"])
elif columnorder[section] == "profitPh":
returnstring="Profit:"+str(data["profit"])+"\n"
returnstring+="System: "+data["Bsystemname"]+"\n"
returnstring+=str(data["SystemDistance"])+"ly\n"
returnstring+="Station: "+data["Bbasename"]+"\n"
returnstring+=str(data["Bdistance"] is not None and str(data["Bdistance"])+"ls\n" or "")
returnstring+=str(data["Bdistance"] is not None and str("%.2f" % (SpaceTime.StarToBase(data["Bdistance"])/60))+"min" or "")
return returnstring
elif columnorder[section] == "CprofitPh":
returnstring="Profit:"+str(data["Cprofit"])+"\n"
returnstring+="System: "+data["Csystemname"]+"\n"
returnstring+=str(data["CSystemDistance"])+"ly\n"
returnstring+="Station: "+data["Cbasename"]+"\n"
returnstring+=str(data["Cdistance"] is not None and str(data["Cdistance"])+"ls\n" or "")
returnstring+=str(data["Cdistance"] is not None and str("%.2f" % (SpaceTime.StarToBase(data["Cdistance"])/60))+"min" or "")
return returnstring
else:
return None
################# VISIBLE DATA ##################
if role == QtCore.Qt.DisplayRole:
if section >=len(columnorder):
return None
if "celltype" in data:
if data["celltype"] in ['separatorrow']:
if columnorder[section]=='profit':
return str(int(data["loopmaxprofit"]))+"cr"
elif columnorder[section]=='profitPh':
return str(data["totalprofitPh"])+"cr/h"
elif columnorder[section] == "hours":
return str(int(data["totalhours"]*60*10)/10)
else:
return None
else:
return None
if columnorder[section] == "_curdist":
if self.mw.currentSystem is None:
return '?'
else:
pos=self.mw.currentSystem.getPosition()
dist=( (pos[0]-data["Ax"])**2 + (pos[1]-data["Ay"])**2 + (pos[2]-data["Az"])**2 ) ** 0.5
return "%.2f" % dist # two decimals
elif columnorder[section] == "_Bcurdist":
if self.mw.currentSystem is None:
return '?'
else:
pos=self.mw.currentSystem.getPosition()
dist=( (pos[0]-data["Bx"])**2 + (pos[1]-data["By"])**2 + (pos[2]-data["Bz"])**2 ) ** 0.5
return "%.2f" % dist # two decimals
elif columnorder[section] == "_targetdist":
if self.mw.targetSystemCombo.currentText() is None:
return '?'
else:
pos=self.mw.targetSystem.getPosition()
dist=( (pos[0]-data["Bx"])**2 + (pos[1]-data["By"])**2 + (pos[2]-data["Bz"])**2 ) ** 0.5
return "%.2f" % dist # two decimals
elif columnorder[section] == "DistanceSq":
return data["DistanceSq"] ** 0.5
elif columnorder[section] == "SystemDistance":
return data["SystemDistance"]
elif columnorder[section] == "profitPh":
return str(int(data["profit"]/data["hours"]))
elif columnorder[section] == "hours":
return str(int(data["hours"]*60*10)/10)
else:
return data[columnorder[section]]
return None # default when nothing matches
def headerData(self, section, orientation, role):
if role == QtCore.Qt.DisplayRole: # visible text data
if orientation != QtCore.Qt.Horizontal:
return None
columnorder=self.columnorder[self.mw.searchType]
if section>=len(columnorder):
return
if columnorder[section] in ["_curdist","_Bcurdist"]:
#field="Curr.Dist."
if self.mw.currentSystem is None:
sysname = 'here'
else:
sysname = self.mw.currentSystem.getName()
field="Ly from "+sysname
elif columnorder[section] in ["_targetdist"]:
#field="Curr.Dist."
if self.mw.targetSystem is None:
sysname = 'target'
else:
sysname = self.mw.targetSystem.getName()
field="Ly to "+sysname
elif columnorder[section] == "Asystemname":
field="From System"
elif columnorder[section] == "Abasename":
field="From Station"
elif columnorder[section] == "AexportPrice":
field="Export Cr"
elif columnorder[section] in ["commodityname","Ccommodityname"]:
field="Commodity"
elif columnorder[section] == "BimportPrice":
field="Import Cr"
elif columnorder[section] == "Bsystemname":
field="To System"
elif columnorder[section] == "Bbasename":
field="To Station"
elif columnorder[section] == "DistanceSq":
field="Distance"
elif columnorder[section] == "SystemDistance":
field="Distance"
elif columnorder[section] == "profit":
field="Profit Cr"
elif columnorder[section] == "Cprofit":
field="Return Profit Cr"
elif columnorder[section] == "totalprofit":
field="Total Profit Cr"
elif columnorder[section] == "hours":
field="Minutes travel"
elif columnorder[section] == "profitPh":
field="Profit Cr/h"
elif columnorder[section] == "Asupply":
field="Supply"
elif columnorder[section] == "Bdemand":
field="Demand"
else:
return None
return field
return None # default when nothing matches
def refeshData(self):
self.beginResetModel()
self.endResetModel()
#self.dataChanged.emit(self.createIndex(0,0), self.createIndex(self.columnCount(1), len(self.mw.result)), [])
self.dataChanged.emit(self.createIndex(0,0), self.createIndex(8, len(self.mw.result)), [])
# reset scroll
self.mw.SearchResultTable.verticalScrollBar().setSliderPosition(0)
| |
import datetime
from bitmovin import Bitmovin, Encoding, HTTPSInput, S3Output, H264CodecConfiguration, \
AACCodecConfiguration, H264Profile, StreamInput, SelectionMode, Stream, EncodingOutput, ACLEntry, ACLPermission, \
FMP4Muxing, MuxingStream, CloudRegion, DashManifest, FMP4Representation, FMP4RepresentationType, Period, \
VideoAdaptationSet, AudioAdaptationSet, Sprite
from bitmovin.errors import BitmovinError
API_KEY = '<INSERT_YOUR_API_KEY>'
# https://<INSERT_YOUR_HTTP_HOST>/<INSERT_YOUR_HTTP_PATH>
HTTPS_INPUT_HOST = '<INSERT_YOUR_HTTPS_HOST>'
HTTPS_INPUT_PATH = '<INSERT_YOUR_HTTPS_PATH>'
S3_OUTPUT_ACCESSKEY = '<INSERT_YOUR_ACCESS_KEY>'
S3_OUTPUT_SECRETKEY = '<INSERT_YOUR_SECRET_KEY>'
S3_OUTPUT_BUCKETNAME = '<INSERT_YOUR_BUCKET_NAME>'
date_component = str(datetime.datetime.now()).replace(' ', '_').replace(':', '-').split('.')[0].replace('_', '__')
OUTPUT_BASE_PATH = '/your/output/base/path/{}/'.format(date_component)
def main():
bitmovin = Bitmovin(api_key=API_KEY)
https_input = HTTPSInput(name='create_simple_encoding HTTPS input', host=HTTPS_INPUT_HOST)
https_input = bitmovin.inputs.HTTPS.create(https_input).resource
s3_output = S3Output(access_key=S3_OUTPUT_ACCESSKEY,
secret_key=S3_OUTPUT_SECRETKEY,
bucket_name=S3_OUTPUT_BUCKETNAME,
name='Sample S3 Output')
s3_output = bitmovin.outputs.S3.create(s3_output).resource
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
encoding = Encoding(name='example encoding',
cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1)
encoding = bitmovin.encodings.Encoding.create(encoding).resource
video_codec_configuration_1080p = H264CodecConfiguration(name='example_video_codec_configuration_1080p',
bitrate=4800000,
rate=25.0,
width=1920,
height=1080,
profile=H264Profile.HIGH)
video_codec_configuration_1080p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_1080p).resource
video_codec_configuration_720p = H264CodecConfiguration(name='example_video_codec_configuration_720p',
bitrate=2400000,
rate=25.0,
width=1280,
height=720,
profile=H264Profile.HIGH)
video_codec_configuration_720p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_720p).resource
audio_codec_configuration = AACCodecConfiguration(name='example_audio_codec_configuration_english',
bitrate=128000,
rate=48000)
audio_codec_configuration = bitmovin.codecConfigurations.AAC.create(audio_codec_configuration).resource
video_input_stream = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
audio_input_stream = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
video_stream_1080p = Stream(codec_configuration_id=video_codec_configuration_1080p.id,
input_streams=[video_input_stream], name='Sample Stream 1080p')
video_stream_1080p = bitmovin.encodings.Stream.create(object_=video_stream_1080p,
encoding_id=encoding.id).resource
video_stream_720p = Stream(codec_configuration_id=video_codec_configuration_720p.id,
input_streams=[video_input_stream], name='Sample Stream 720p')
video_stream_720p = bitmovin.encodings.Stream.create(object_=video_stream_720p,
encoding_id=encoding.id).resource
audio_stream = Stream(codec_configuration_id=audio_codec_configuration.id,
input_streams=[audio_input_stream], name='Sample Stream AUDIO')
audio_stream = bitmovin.encodings.Stream.create(object_=audio_stream,
encoding_id=encoding.id).resource
sprite_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'sprite/',
acl=[acl_entry])
sprite = Sprite(name='Sample Sprite Bitmovin Python',
description='Sample Sprite created with bitmovin-python',
height=360,
width=640,
sprite_name='fullhd_640x360.jpg',
vtt_name='fullhd_640x360.vtt',
distance=10,
outputs=[sprite_output])
sprite = bitmovin.encodings.Stream.Sprite.create(object_=sprite,
encoding_id=encoding.id,
stream_id=video_stream_1080p.id).resource
video_muxing_stream_1080p = MuxingStream(video_stream_1080p.id)
video_muxing_stream_720p = MuxingStream(video_stream_720p.id)
audio_muxing_stream = MuxingStream(audio_stream.id)
video_muxing_1080p_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/1080p/',
acl=[acl_entry])
video_muxing_1080p = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=[video_muxing_stream_1080p],
outputs=[video_muxing_1080p_output],
name='Sample Muxing 1080p')
video_muxing_1080p = bitmovin.encodings.Muxing.FMP4.create(object_=video_muxing_1080p,
encoding_id=encoding.id).resource
video_muxing_720p_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/720p/',
acl=[acl_entry])
video_muxing_720p = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=[video_muxing_stream_720p],
outputs=[video_muxing_720p_output],
name='Sample Muxing 720p')
video_muxing_720p = bitmovin.encodings.Muxing.FMP4.create(object_=video_muxing_720p,
encoding_id=encoding.id).resource
audio_muxing_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'audio/',
acl=[acl_entry])
audio_muxing = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=[audio_muxing_stream],
outputs=[audio_muxing_output],
name='Sample Muxing AUDIO')
audio_muxing = bitmovin.encodings.Muxing.FMP4.create(object_=audio_muxing,
encoding_id=encoding.id).resource
bitmovin.encodings.Encoding.start(encoding_id=encoding.id)
try:
bitmovin.encodings.Encoding.wait_until_finished(encoding_id=encoding.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for encoding to finish: {}".format(bitmovin_error))
manifest_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH,
acl=[acl_entry])
dash_manifest = DashManifest(manifest_name='example_manifest_sintel_dash.mpd',
outputs=[manifest_output],
name='Sample DASH Manifest')
dash_manifest = bitmovin.manifests.DASH.create(dash_manifest).resource
period = Period()
period = bitmovin.manifests.DASH.add_period(object_=period, manifest_id=dash_manifest.id).resource
video_adaptation_set = VideoAdaptationSet()
video_adaptation_set = bitmovin.manifests.DASH.add_video_adaptation_set(object_=video_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
audio_adaptation_set = AudioAdaptationSet(lang='en')
audio_adaptation_set = bitmovin.manifests.DASH.add_audio_adaptation_set(object_=audio_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
fmp4_representation_1080p = FMP4Representation(FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=video_muxing_1080p.id,
segment_path='video/1080p/')
fmp4_representation_1080p = bitmovin.manifests.DASH.add_fmp4_representation(object_=fmp4_representation_1080p,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=video_adaptation_set.id
).resource
fmp4_representation_720p = FMP4Representation(FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=video_muxing_720p.id,
segment_path='video/720p/')
fmp4_representation_720p = bitmovin.manifests.DASH.add_fmp4_representation(object_=fmp4_representation_720p,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=video_adaptation_set.id
).resource
fmp4_representation_audio = FMP4Representation(FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=audio_muxing.id,
segment_path='audio/')
fmp4_representation_audio = bitmovin.manifests.DASH.add_fmp4_representation(object_=fmp4_representation_audio,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=audio_adaptation_set.id
).resource
bitmovin.manifests.DASH.start(manifest_id=dash_manifest.id)
try:
bitmovin.manifests.DASH.wait_until_finished(manifest_id=dash_manifest.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for manifest creation to finish: {}".format(bitmovin_error))
if __name__ == '__main__':
main()
| |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Classes for working with microVMs.
This module defines `Microvm`, which can be used to create, test drive, and
destroy microvms.
# TODO
- Use the Firecracker Open API spec to populate Microvm API resource URLs.
"""
import json
import logging
import os
import re
import select
import shutil
import time
import weakref
from threading import Lock
from retry import retry
from retry.api import retry_call
import host_tools.logging as log_tools
import host_tools.cpu_load as cpu_tools
import host_tools.memory as mem_tools
import host_tools.network as net_tools
from framework import utils
from framework.defs import MICROVM_KERNEL_RELPATH, MICROVM_FSFILES_RELPATH, \
FC_PID_FILE_NAME
from framework.http import Session
from framework.jailer import JailerContext
from framework.resources import Actions, Balloon, BootSource, Drive, \
DescribeInstance, FullConfig, InstanceVersion, Logger, MMDS, \
MachineConfigure, Metrics, Network, Vm, Vsock, SnapshotHelper
LOG = logging.getLogger("microvm")
data_lock = Lock()
# pylint: disable=R0904
class Microvm:
"""Class to represent a Firecracker microvm.
A microvm is described by a unique identifier, a path to all the resources
it needs in order to be able to start and the binaries used to spawn it.
Besides keeping track of microvm resources and exposing microvm API
methods, `spawn()` and `kill()` can be used to start/end the microvm
process.
"""
SCREEN_LOGFILE = "/tmp/screen-{}.log"
__log_data = ""
def __init__(
self,
resource_path,
fc_binary_path,
jailer_binary_path,
microvm_id,
monitor_memory=True,
bin_cloner_path=None,
):
"""Set up microVM attributes, paths, and data structures."""
# Unique identifier for this machine.
self._microvm_id = microvm_id
# Compose the paths to the resources specific to this microvm.
self._path = os.path.join(resource_path, microvm_id)
self._kernel_path = os.path.join(self._path, MICROVM_KERNEL_RELPATH)
self._fsfiles_path = os.path.join(self._path, MICROVM_FSFILES_RELPATH)
self._kernel_file = ''
self._rootfs_file = ''
self._initrd_file = ''
# The binaries this microvm will use to start.
self._fc_binary_path = fc_binary_path
assert os.path.exists(self._fc_binary_path)
self._jailer_binary_path = jailer_binary_path
assert os.path.exists(self._jailer_binary_path)
# Create the jailer context associated with this microvm.
self._jailer = JailerContext(
jailer_id=self._microvm_id,
exec_file=self._fc_binary_path,
)
self.jailer_clone_pid = None
self._screen_log = None
# Copy the /etc/localtime file in the jailer root
self.jailer.copy_into_root(
"/etc/localtime", create_jail=True)
# Now deal with the things specific to the api session used to
# communicate with this machine.
self._api_session = None
self._api_socket = None
# Session name is composed of the last part of the temporary path
# allocated by the current test session and the unique id of this
# microVM. It should be unique.
self._session_name = os.path.basename(os.path.normpath(
resource_path
)) + self._microvm_id
# nice-to-have: Put these in a dictionary.
self.actions = None
self.balloon = None
self.boot = None
self.desc_inst = None
self.drive = None
self.full_cfg = None
self.logger = None
self.metrics = None
self.mmds = None
self.network = None
self.machine_cfg = None
self.version = None
self.vm = None
self.vsock = None
self.snapshot = None
# Initialize the logging subsystem.
self.logging_thread = None
self._screen_pid = None
# The ssh config dictionary is populated with information about how
# to connect to a microVM that has ssh capability. The path of the
# private key is populated by microvms with ssh capabilities and the
# hostname is set from the MAC address used to configure the microVM.
self._ssh_config = {
'username': 'root',
'netns_file_path': self._jailer.netns_file_path()
}
# Deal with memory monitoring.
if monitor_memory:
self._memory_monitor = mem_tools.MemoryMonitor()
else:
self._memory_monitor = None
# Cpu load monitoring has to be explicitly enabled using
# the `enable_cpu_load_monitor` method.
self._cpu_load_monitor = None
self._vcpus_count = None
# External clone/exec tool, because Python can't into clone
self.bin_cloner_path = bin_cloner_path
# Flag checked in destructor to see abnormal signal-induced crashes.
self.expect_kill_by_signal = False
# MMDS content from file
self._metadata_file = None
def kill(self):
"""All clean up associated with this microVM should go here."""
# pylint: disable=subprocess-run-check
if self.logging_thread is not None:
self.logging_thread.stop()
if self.expect_kill_by_signal is False and \
"Shutting down VM after intercepting signal" in self.log_data:
# Too late to assert at this point, pytest will still report the
# test as passed. BUT we can dump full logs for debugging,
# as well as an intentional eye-sore in the test report.
LOG.error(self.log_data)
if self._jailer.daemonize:
if self.jailer_clone_pid:
utils.run_cmd(
'kill -9 {}'.format(self.jailer_clone_pid),
ignore_return_code=True)
else:
# Killing screen will send SIGHUP to underlying Firecracker.
# Needed to avoid false positives in case kill() is called again.
self.expect_kill_by_signal = True
utils.run_cmd(
'kill -9 {} || true'.format(self.screen_pid))
# Check if Firecracker was launched by the jailer in a new pid ns.
fc_pid_in_new_ns = self.pid_in_new_ns
if fc_pid_in_new_ns:
# We need to explicitly kill the Firecracker pid, since it's
# different from the jailer pid that was previously killed.
utils.run_cmd(f'kill -9 {fc_pid_in_new_ns}',
ignore_return_code=True)
if self._memory_monitor and self._memory_monitor.is_alive():
self._memory_monitor.signal_stop()
self._memory_monitor.join(timeout=1)
self._memory_monitor.check_samples()
if self._cpu_load_monitor:
self._cpu_load_monitor.signal_stop()
self._cpu_load_monitor.join()
self._cpu_load_monitor.check_samples()
@property
def firecracker_version(self):
"""Return the version of the Firecracker executable."""
return self.version.get()
@property
def api_session(self):
"""Return the api session associated with this microVM."""
return self._api_session
@property
def api_socket(self):
"""Return the socket used by this api session."""
# TODO: this methods is only used as a workaround for getting
# firecracker PID. We should not be forced to make this public.
return self._api_socket
@property
def path(self):
"""Return the path on disk used that represents this microVM."""
return self._path
@property
def id(self):
"""Return the unique identifier of this microVM."""
return self._microvm_id
@property
def jailer(self):
"""Return the jailer context associated with this microVM."""
return self._jailer
@jailer.setter
def jailer(self, jailer):
"""Setter for associating a different jailer to the default one."""
self._jailer = jailer
@property
def kernel_file(self):
"""Return the name of the kernel file used by this microVM to boot."""
return self._kernel_file
@kernel_file.setter
def kernel_file(self, path):
"""Set the path to the kernel file."""
self._kernel_file = path
@property
def initrd_file(self):
"""Return the name of the initrd file used by this microVM to boot."""
return self._initrd_file
@initrd_file.setter
def initrd_file(self, path):
"""Set the path to the initrd file."""
self._initrd_file = path
@property
def log_data(self):
"""Return the log data.
!!!!OBS!!!!: Do not use this to check for message existence and
rather use self.check_log_message or self.find_log_message.
"""
with data_lock:
log_data = self.__log_data
return log_data
@property
def rootfs_file(self):
"""Return the path to the image this microVM can boot into."""
return self._rootfs_file
@rootfs_file.setter
def rootfs_file(self, path):
"""Set the path to the image associated."""
self._rootfs_file = path
@property
def fsfiles(self):
"""Path to filesystem used by this microvm to attach new drives."""
return self._fsfiles_path
@property
def ssh_config(self):
"""Get the ssh configuration used to ssh into some microVMs."""
return self._ssh_config
@ssh_config.setter
def ssh_config(self, key, value):
"""Set the dict values inside this configuration."""
self._ssh_config.__setattr__(key, value)
@property
def metadata_file(self):
"""Return the path to a file used for populating MMDS."""
return self._metadata_file
@metadata_file.setter
def metadata_file(self, path):
"""Set the path to a file to use for populating MMDS."""
self._metadata_file = path
@property
def memory_monitor(self):
"""Get the memory monitor."""
return self._memory_monitor
@property
def state(self):
"""Get the InstanceInfo property and return the state field."""
return json.loads(self.desc_inst.get().content)["state"]
@property
def started(self):
"""Get the InstanceInfo property and return the started field.
This is kept for legacy snapshot support.
"""
return json.loads(self.desc_inst.get().content)["started"]
@memory_monitor.setter
def memory_monitor(self, monitor):
"""Set the memory monitor."""
self._memory_monitor = monitor
@property
def pid_in_new_ns(self):
"""Get the pid of the Firecracker process in the new namespace.
Returns None if Firecracker was not launched in a new pid ns.
"""
fc_pid = None
pid_file_path = f"{self.jailer.chroot_path()}/{FC_PID_FILE_NAME}"
if os.path.exists(pid_file_path):
# Read the PID stored inside the file.
with open(pid_file_path, encoding='utf-8') as file:
fc_pid = int(file.readline())
return fc_pid
def flush_metrics(self, metrics_fifo):
"""Flush the microvm metrics.
Requires specifying the configured metrics file.
"""
# Empty the metrics pipe.
_ = metrics_fifo.sequential_reader(100)
response = self.actions.put(action_type='FlushMetrics')
assert self.api_session.is_status_no_content(response.status_code)
lines = metrics_fifo.sequential_reader(100)
assert len(lines) == 1
return json.loads(lines[0])
def get_all_metrics(self, metrics_fifo):
"""Return all metric data points written by FC.
Requires specifying the configured metrics file.
"""
# Empty the metrics pipe.
response = self.actions.put(action_type='FlushMetrics')
assert self.api_session.is_status_no_content(response.status_code)
return metrics_fifo.sequential_reader(1000)
def append_to_log_data(self, data):
"""Append a message to the log data."""
with data_lock:
self.__log_data += data
def enable_cpu_load_monitor(self, threshold):
"""Enable the cpu load monitor."""
process_pid = self.jailer_clone_pid
# We want to monitor the emulation thread, which is currently
# the first one created.
# A possible improvement is to find it by name.
thread_pid = self.jailer_clone_pid
self._cpu_load_monitor = cpu_tools.CpuLoadMonitor(
process_pid,
thread_pid,
threshold
)
self._cpu_load_monitor.start()
def copy_to_jail_ramfs(self, src):
"""Copy a file to a jail ramfs."""
filename = os.path.basename(src)
dest_path = os.path.join(self.jailer.chroot_ramfs_path(), filename)
jailed_path = os.path.join(
'/', self.jailer.ramfs_subdir_name, filename
)
shutil.copy(src, dest_path)
cmd = 'chown {}:{} {}'.format(
self.jailer.uid,
self.jailer.gid,
dest_path
)
utils.run_cmd(cmd)
return jailed_path
def create_jailed_resource(self, path, create_jail=False):
"""Create a hard link to some resource inside this microvm."""
return self.jailer.jailed_path(path, create=True,
create_jail=create_jail)
def get_jailed_resource(self, path):
"""Get the relative jailed path to a resource."""
return self.jailer.jailed_path(path, create=False)
def chroot(self):
"""Get the chroot of this microVM."""
return self.jailer.chroot_path()
def setup(self):
"""Create a microvm associated folder on the host.
The root path of some microvm is `self._path`.
Also creates the where essential resources (i.e. kernel and root
filesystem) will reside.
# Microvm Folder Layout
There is a fixed tree layout for a microvm related folder:
``` file_tree
<microvm_uuid>/
kernel/
<kernel_file_n>
....
fsfiles/
<fsfile_n>
<initrd_file_n>
<ssh_key_n>
<other fsfiles>
...
...
```
"""
os.makedirs(self._path, exist_ok=True)
os.makedirs(self._kernel_path, exist_ok=True)
os.makedirs(self._fsfiles_path, exist_ok=True)
@property
def screen_log(self):
"""Get the screen log file."""
return self._screen_log
@property
def screen_pid(self):
"""Get the screen PID."""
return self._screen_pid
@property
def vcpus_count(self):
"""Get the vcpus count."""
return self._vcpus_count
@vcpus_count.setter
def vcpus_count(self, vcpus_count: int):
"""Set the vcpus count."""
self._vcpus_count = vcpus_count
def pin_vmm(self, cpu_id: int) -> bool:
"""Pin the firecracker process VMM thread to a cpu list."""
if self.jailer_clone_pid:
for thread in utils.ProcessManager.get_threads(
self.jailer_clone_pid)["firecracker"]:
utils.ProcessManager.set_cpu_affinity(thread, [cpu_id])
return True
return False
def pin_vcpu(self, vcpu_id: int, cpu_id: int):
"""Pin the firecracker vcpu thread to a cpu list."""
if self.jailer_clone_pid:
for thread in utils.ProcessManager.get_threads(
self.jailer_clone_pid)[f"fc_vcpu {vcpu_id}"]:
utils.ProcessManager.set_cpu_affinity(thread, [cpu_id])
return True
return False
def pin_api(self, cpu_id: int):
"""Pin the firecracker process API server thread to a cpu list."""
if self.jailer_clone_pid:
for thread in utils.ProcessManager.get_threads(
self.jailer_clone_pid)["fc_api"]:
utils.ProcessManager.set_cpu_affinity(thread, [cpu_id])
return True
return False
def spawn(self, create_logger=True,
log_file='log_fifo', log_level='Info', use_ramdisk=False):
"""Start a microVM as a daemon or in a screen session."""
# pylint: disable=subprocess-run-check
self._jailer.setup(use_ramdisk=use_ramdisk)
self._api_socket = self._jailer.api_socket_path()
self._api_session = Session()
self.actions = Actions(self._api_socket, self._api_session)
self.balloon = Balloon(self._api_socket, self._api_session)
self.boot = BootSource(self._api_socket, self._api_session)
self.desc_inst = DescribeInstance(self._api_socket, self._api_session)
self.full_cfg = FullConfig(self._api_socket, self._api_session)
self.logger = Logger(self._api_socket, self._api_session)
self.version = InstanceVersion(
self._api_socket, self._fc_binary_path, self._api_session)
self.machine_cfg = MachineConfigure(
self._api_socket,
self._api_session,
self.firecracker_version
)
self.metrics = Metrics(self._api_socket, self._api_session)
self.mmds = MMDS(self._api_socket, self._api_session)
self.network = Network(self._api_socket, self._api_session)
self.snapshot = SnapshotHelper(self._api_socket, self._api_session)
self.drive = Drive(self._api_socket, self._api_session,
self.firecracker_version)
self.vm = Vm(self._api_socket, self._api_session)
self.vsock = Vsock(self._api_socket, self._api_session)
if create_logger:
log_fifo_path = os.path.join(self.path, log_file)
log_fifo = log_tools.Fifo(log_fifo_path)
self.create_jailed_resource(log_fifo.path, create_jail=True)
# The default value for `level`, when configuring the
# logger via cmd line, is `Warning`. We set the level
# to `Info` to also have the boot time printed in fifo.
self.jailer.extra_args.update({'log-path': log_file,
'level': log_level})
self.start_console_logger(log_fifo)
if self.metadata_file:
if os.path.exists(self.metadata_file):
LOG.debug("metadata file exists, adding as a jailed resource")
self.create_jailed_resource(self.metadata_file,
create_jail=True)
self.jailer.extra_args.update(
{'metadata': os.path.basename(self.metadata_file)}
)
jailer_param_list = self._jailer.construct_param_list()
# When the daemonize flag is on, we want to clone-exec into the
# jailer rather than executing it via spawning a shell. Going
# forward, we'll probably switch to this method for running
# Firecracker in general, because it represents the way it's meant
# to be run by customers (together with CLONE_NEWPID flag).
#
# We have to use an external tool for CLONE_NEWPID, because
# 1) Python doesn't provide a os.clone() interface, and
# 2) Python's ctypes libc interface appears to be broken, causing
# our clone / exec to deadlock at some point.
if self._jailer.daemonize:
self.daemonize_jailer(jailer_param_list)
else:
# This file will collect any output from 'screen'ed Firecracker.
self._screen_log = self.SCREEN_LOGFILE.format(self._session_name)
start_cmd = 'screen -L -Logfile {logfile} '\
'-dmS {session} {binary} {params}'
start_cmd = start_cmd.format(
logfile=self.screen_log,
session=self._session_name,
binary=self._jailer_binary_path,
params=' '.join(jailer_param_list)
)
utils.run_cmd(start_cmd)
# Build a regex object to match (number).session_name
regex_object = re.compile(
r'([0-9]+)\.{}'.format(self._session_name))
# Run 'screen -ls' in a retry_call loop, 30 times with a one
# second delay between calls.
# If the output of 'screen -ls' matches the regex object, it will
# return the PID. Otherwise a RuntimeError will be raised.
screen_pid = retry_call(
utils.search_output_from_cmd,
fkwargs={
"cmd": 'screen -ls',
"find_regex": regex_object
},
exceptions=RuntimeError,
tries=30,
delay=1).group(1)
self._screen_pid = screen_pid
self.jailer_clone_pid = int(open('/proc/{0}/task/{0}/children'
.format(screen_pid),
encoding='utf-8').read().strip())
# Configure screen to flush stdout to file.
flush_cmd = 'screen -S {session} -X colon "logfile flush 0^M"'
utils.run_cmd(flush_cmd.format(session=self._session_name))
# Wait for the jailer to create resources needed, and Firecracker to
# create its API socket.
# We expect the jailer to start within 80 ms. However, we wait for
# 1 sec since we are rechecking the existence of the socket 5 times
# and leave 0.2 delay between them.
if 'no-api' not in self._jailer.extra_args:
self._wait_create()
if create_logger:
self.check_log_message("Running Firecracker")
@retry(delay=0.2, tries=5)
def _wait_create(self):
"""Wait until the API socket and chroot folder are available."""
os.stat(self._jailer.api_socket_path())
@retry(delay=0.1, tries=5)
def check_log_message(self, message):
"""Wait until `message` appears in logging output."""
assert message in self.log_data
@retry(delay=0.1, tries=5)
def check_any_log_message(self, messages):
"""Wait until any message in `messages` appears in logging output."""
for message in messages:
if message in self.log_data:
return
raise AssertionError(
f"`{messages}` were not found in this log: {self.log_data}"
)
@retry(delay=0.1, tries=5)
def find_log_message(self, regex):
"""Wait until `regex` appears in logging output and return it."""
reg_res = re.findall(regex, self.log_data)
assert reg_res
return reg_res
def serial_input(self, input_string):
"""Send a string to the Firecracker serial console via screen."""
input_cmd = 'screen -S {session} -p 0 -X stuff "{input_string}"'
utils.run_cmd(input_cmd.format(session=self._session_name,
input_string=input_string))
def basic_config(
self,
vcpu_count: int = 2,
smt: bool = None,
mem_size_mib: int = 256,
add_root_device: bool = True,
boot_args: str = None,
use_initrd: bool = False,
track_dirty_pages: bool = False,
rootfs_io_engine=None
):
"""Shortcut for quickly configuring a microVM.
It handles:
- CPU and memory.
- Kernel image (will load the one in the microVM allocated path).
- Root File System (will use the one in the microVM allocated path).
- Does not start the microvm.
The function checks the response status code and asserts that
the response is within the interval [200, 300).
"""
response = self.machine_cfg.put(
vcpu_count=vcpu_count,
smt=smt,
mem_size_mib=mem_size_mib,
track_dirty_pages=track_dirty_pages
)
assert self._api_session.is_status_no_content(response.status_code), \
response.text
if self.memory_monitor:
self.memory_monitor.guest_mem_mib = mem_size_mib
self.memory_monitor.pid = self.jailer_clone_pid
self.memory_monitor.start()
boot_source_args = {
'kernel_image_path': self.create_jailed_resource(self.kernel_file),
'boot_args': boot_args
}
if use_initrd and self.initrd_file != '':
boot_source_args.update(
initrd_path=self.create_jailed_resource(self.initrd_file))
response = self.boot.put(**boot_source_args)
assert self._api_session.is_status_no_content(response.status_code), \
response.text
if add_root_device and self.rootfs_file != '':
# Add the root file system with rw permissions.
response = self.drive.put(
drive_id='rootfs',
path_on_host=self.create_jailed_resource(self.rootfs_file),
is_root_device=True,
is_read_only=False,
io_engine=rootfs_io_engine
)
assert self._api_session \
.is_status_no_content(response.status_code), \
response.text
def daemonize_jailer(
self,
jailer_param_list
):
"""Daemonize the jailer."""
if self.bin_cloner_path and self.jailer.new_pid_ns is not True:
cmd = [self.bin_cloner_path] + \
[self._jailer_binary_path] + \
jailer_param_list
_p = utils.run_cmd(cmd)
# Terrible hack to make the tests fail when starting the
# jailer fails with a panic. This is needed because we can't
# get the exit code of the jailer. In newpid_clone.c we are
# not waiting for the process and we always return 0 if the
# clone was successful (which in most cases will be) and we
# don't do anything if the jailer was not started
# successfully.
if _p.stderr.strip():
raise Exception(_p.stderr)
self.jailer_clone_pid = int(_p.stdout.rstrip())
else:
# Fallback mechanism for when we offload PID namespacing
# to the jailer.
_pid = os.fork()
if _pid == 0:
os.execv(
self._jailer_binary_path,
[self._jailer_binary_path] + jailer_param_list
)
self.jailer_clone_pid = _pid
def add_drive(
self,
drive_id,
file_path,
root_device=False,
is_read_only=False,
partuuid=None,
cache_type=None,
io_engine=None,
use_ramdisk=False,
):
"""Add a block device."""
response = self.drive.put(
drive_id=drive_id,
path_on_host=(
self.copy_to_jail_ramfs(file_path) if
use_ramdisk else self.create_jailed_resource(file_path)
),
is_root_device=root_device,
is_read_only=is_read_only,
partuuid=partuuid,
cache_type=cache_type,
io_engine=io_engine
)
assert self.api_session.is_status_no_content(response.status_code)
def patch_drive(self, drive_id, file):
"""Modify/patch an existing block device."""
response = self.drive.patch(
drive_id=drive_id,
path_on_host=self.create_jailed_resource(file.path),
)
assert self.api_session.is_status_no_content(response.status_code)
def ssh_network_config(
self,
network_config,
iface_id,
tx_rate_limiter=None,
rx_rate_limiter=None,
tapname=None
):
"""Create a host tap device and a guest network interface.
'network_config' is used to generate 2 IPs: one for the tap device
and one for the microvm. Adds the hostname of the microvm to the
ssh_config dictionary.
:param network_config: UniqueIPv4Generator instance
:param iface_id: the interface id for the API request
the guest on this interface towards the MMDS address are
intercepted and processed by the device model.
:param tx_rate_limiter: limit the tx rate
:param rx_rate_limiter: limit the rx rate
:return: an instance of the tap which needs to be kept around until
cleanup is desired, the configured guest and host ips, respectively.
"""
# Create tap before configuring interface.
tapname = tapname or (self.id[:8] + 'tap' + iface_id)
(host_ip, guest_ip) = network_config.get_next_available_ips(2)
tap = self.create_tap_and_ssh_config(host_ip,
guest_ip,
network_config.get_netmask_len(),
tapname)
guest_mac = net_tools.mac_from_ip(guest_ip)
response = self.network.put(
iface_id=iface_id,
host_dev_name=tapname,
guest_mac=guest_mac,
tx_rate_limiter=tx_rate_limiter,
rx_rate_limiter=rx_rate_limiter
)
assert self._api_session.is_status_no_content(response.status_code)
return tap, host_ip, guest_ip
def create_tap_and_ssh_config(
self,
host_ip,
guest_ip,
netmask_len,
tapname=None
):
"""Create tap device and configure ssh."""
assert tapname is not None
tap = net_tools.Tap(
tapname,
self._jailer.netns,
ip="{}/{}".format(
host_ip,
netmask_len
)
)
self.config_ssh(guest_ip)
return tap
def config_ssh(self, guest_ip):
"""Configure ssh."""
self.ssh_config['hostname'] = guest_ip
def start(self, check=True):
"""Start the microvm.
This function has asserts to validate that the microvm boot success.
"""
# Check that the VM has not started yet
try:
assert self.state == "Not started"
except KeyError:
assert self.started is False
response = self.actions.put(action_type='InstanceStart')
if check:
assert \
self._api_session.is_status_no_content(response.status_code), \
response.text
# Check that the VM has started
try:
assert self.state == "Running"
except KeyError:
assert self.started is True
def pause_to_snapshot(self,
mem_file_path=None,
snapshot_path=None,
diff=False,
version=None):
"""Pauses the microVM, and creates snapshot.
This function validates that the microVM pauses successfully and
creates a snapshot.
"""
assert mem_file_path is not None, "Please specify mem_file_path."
assert snapshot_path is not None, "Please specify snapshot_path."
response = self.vm.patch(state='Paused')
assert self.api_session.is_status_no_content(response.status_code)
self.api_session.untime()
response = self.snapshot.create(mem_file_path=mem_file_path,
snapshot_path=snapshot_path,
diff=diff,
version=version)
assert self.api_session.is_status_no_content(response.status_code), \
response.text
def start_console_logger(self, log_fifo):
"""
Start a thread that monitors the microVM console.
The console output will be redirected to the log file.
"""
def monitor_fd(microvm, path):
try:
fd = open(path, "r", encoding='utf-8')
while True:
try:
if microvm().logging_thread.stopped():
return
data = fd.readline()
if data:
microvm().append_to_log_data(data)
except AttributeError as _:
# This means that the microvm object was destroyed and
# we are using a None reference.
return
except IOError as error:
# pylint: disable=W0150
try:
LOG.error("[%s] IOError while monitoring fd:"
" %s", microvm().id, error)
microvm().append_to_log_data(str(error))
except AttributeError as _:
# This means that the microvm object was destroyed and
# we are using a None reference.
pass
finally:
return
self.logging_thread = utils.StoppableThread(
target=monitor_fd,
args=(weakref.ref(self), log_fifo.path),
daemon=True)
self.logging_thread.start()
def __del__(self):
"""Teardown the object."""
self.kill()
class Serial:
"""Class for serial console communication with a Microvm."""
RX_TIMEOUT_S = 5
def __init__(self, vm):
"""Initialize a new Serial object."""
self._poller = None
self._vm = vm
def open(self):
"""Open a serial connection."""
# Open the screen log file.
if self._poller is not None:
# serial already opened
return
screen_log_fd = os.open(self._vm.screen_log,
os.O_RDONLY)
self._poller = select.poll()
self._poller.register(screen_log_fd,
select.POLLIN | select.POLLHUP)
def tx(self, input_string, end='\n'):
# pylint: disable=invalid-name
# No need to have a snake_case naming style for a single word.
r"""Send a string terminated by an end token (defaulting to "\n")."""
self._vm.serial_input(input_string + end)
def rx_char(self):
"""Read a single character."""
result = self._poller.poll(0.1)
for fd, flag in result:
if flag & select.POLLHUP:
assert False, "Oh! The console vanished before test completed."
if flag & select.POLLIN:
output_char = str(os.read(fd, 1),
encoding='utf-8',
errors='ignore')
return output_char
return ''
def rx(self, token="\n"):
# pylint: disable=invalid-name
# No need to have a snake_case naming style for a single word.
r"""Read a string delimited by an end token (defaults to "\n")."""
rx_str = ''
start = time.time()
while True:
rx_str += self.rx_char()
if rx_str.endswith(token):
break
if (time.time() - start) >= self.RX_TIMEOUT_S:
assert False
return rx_str
| |
"""Support for MQTT discovery."""
import asyncio
from collections import deque
import functools
import json
import logging
import re
import time
from homeassistant.const import CONF_DEVICE, CONF_PLATFORM
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import RESULT_TYPE_ABORT
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.loader import async_get_mqtt
from .. import mqtt
from .abbreviations import ABBREVIATIONS, DEVICE_ABBREVIATIONS
from .const import (
ATTR_DISCOVERY_HASH,
ATTR_DISCOVERY_PAYLOAD,
ATTR_DISCOVERY_TOPIC,
CONF_AVAILABILITY,
CONF_TOPIC,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
TOPIC_MATCHER = re.compile(
r"(?P<component>\w+)/(?:(?P<node_id>[a-zA-Z0-9_-]+)/)"
r"?(?P<object_id>[a-zA-Z0-9_-]+)/config"
)
SUPPORTED_COMPONENTS = [
"alarm_control_panel",
"binary_sensor",
"button",
"camera",
"climate",
"cover",
"device_automation",
"device_tracker",
"fan",
"humidifier",
"light",
"lock",
"number",
"scene",
"select",
"sensor",
"switch",
"tag",
"vacuum",
]
ALREADY_DISCOVERED = "mqtt_discovered_components"
PENDING_DISCOVERED = "mqtt_pending_components"
CONFIG_ENTRY_IS_SETUP = "mqtt_config_entry_is_setup"
DATA_CONFIG_ENTRY_LOCK = "mqtt_config_entry_lock"
DATA_CONFIG_FLOW_LOCK = "mqtt_discovery_config_flow_lock"
DISCOVERY_UNSUBSCRIBE = "mqtt_discovery_unsubscribe"
INTEGRATION_UNSUBSCRIBE = "mqtt_integration_discovery_unsubscribe"
MQTT_DISCOVERY_UPDATED = "mqtt_discovery_updated_{}"
MQTT_DISCOVERY_NEW = "mqtt_discovery_new_{}_{}"
MQTT_DISCOVERY_DONE = "mqtt_discovery_done_{}"
LAST_DISCOVERY = "mqtt_last_discovery"
TOPIC_BASE = "~"
def clear_discovery_hash(hass, discovery_hash):
"""Clear entry in ALREADY_DISCOVERED list."""
del hass.data[ALREADY_DISCOVERED][discovery_hash]
def set_discovery_hash(hass, discovery_hash):
"""Clear entry in ALREADY_DISCOVERED list."""
hass.data[ALREADY_DISCOVERED][discovery_hash] = {}
class MQTTConfig(dict):
"""Dummy class to allow adding attributes."""
async def async_start( # noqa: C901
hass: HomeAssistant, discovery_topic, config_entry=None
) -> None:
"""Start MQTT Discovery."""
mqtt_integrations = {}
async def async_discovery_message_received(msg):
"""Process the received message."""
hass.data[LAST_DISCOVERY] = time.time()
payload = msg.payload
topic = msg.topic
topic_trimmed = topic.replace(f"{discovery_topic}/", "", 1)
if not (match := TOPIC_MATCHER.match(topic_trimmed)):
if topic_trimmed.endswith("config"):
_LOGGER.warning(
"Received message on illegal discovery topic '%s'", topic
)
return
component, node_id, object_id = match.groups()
if component not in SUPPORTED_COMPONENTS:
_LOGGER.warning("Integration %s is not supported", component)
return
if payload:
try:
payload = json.loads(payload)
except ValueError:
_LOGGER.warning("Unable to parse JSON %s: '%s'", object_id, payload)
return
payload = MQTTConfig(payload)
for key in list(payload):
abbreviated_key = key
key = ABBREVIATIONS.get(key, key)
payload[key] = payload.pop(abbreviated_key)
if CONF_DEVICE in payload:
device = payload[CONF_DEVICE]
for key in list(device):
abbreviated_key = key
key = DEVICE_ABBREVIATIONS.get(key, key)
device[key] = device.pop(abbreviated_key)
if TOPIC_BASE in payload:
base = payload.pop(TOPIC_BASE)
for key, value in payload.items():
if isinstance(value, str) and value:
if value[0] == TOPIC_BASE and key.endswith("topic"):
payload[key] = f"{base}{value[1:]}"
if value[-1] == TOPIC_BASE and key.endswith("topic"):
payload[key] = f"{value[:-1]}{base}"
if payload.get(CONF_AVAILABILITY):
for availability_conf in cv.ensure_list(payload[CONF_AVAILABILITY]):
if not isinstance(availability_conf, dict):
continue
if topic := availability_conf.get(CONF_TOPIC):
if topic[0] == TOPIC_BASE:
availability_conf[CONF_TOPIC] = f"{base}{topic[1:]}"
if topic[-1] == TOPIC_BASE:
availability_conf[CONF_TOPIC] = f"{topic[:-1]}{base}"
# If present, the node_id will be included in the discovered object id
discovery_id = " ".join((node_id, object_id)) if node_id else object_id
discovery_hash = (component, discovery_id)
if payload:
# Attach MQTT topic to the payload, used for debug prints
setattr(payload, "__configuration_source__", f"MQTT (topic: '{topic}')")
discovery_data = {
ATTR_DISCOVERY_HASH: discovery_hash,
ATTR_DISCOVERY_PAYLOAD: payload,
ATTR_DISCOVERY_TOPIC: topic,
}
setattr(payload, "discovery_data", discovery_data)
payload[CONF_PLATFORM] = "mqtt"
if discovery_hash in hass.data[PENDING_DISCOVERED]:
pending = hass.data[PENDING_DISCOVERED][discovery_hash]["pending"]
pending.appendleft(payload)
_LOGGER.info(
"Component has already been discovered: %s %s, queuing update",
component,
discovery_id,
)
return
await async_process_discovery_payload(component, discovery_id, payload)
async def async_process_discovery_payload(component, discovery_id, payload):
_LOGGER.debug("Process discovery payload %s", payload)
discovery_hash = (component, discovery_id)
if discovery_hash in hass.data[ALREADY_DISCOVERED] or payload:
async def discovery_done(_):
pending = hass.data[PENDING_DISCOVERED][discovery_hash]["pending"]
_LOGGER.debug("Pending discovery for %s: %s", discovery_hash, pending)
if not pending:
hass.data[PENDING_DISCOVERED][discovery_hash]["unsub"]()
hass.data[PENDING_DISCOVERED].pop(discovery_hash)
else:
payload = pending.pop()
await async_process_discovery_payload(
component, discovery_id, payload
)
if discovery_hash not in hass.data[PENDING_DISCOVERED]:
hass.data[PENDING_DISCOVERED][discovery_hash] = {
"unsub": async_dispatcher_connect(
hass,
MQTT_DISCOVERY_DONE.format(discovery_hash),
discovery_done,
),
"pending": deque([]),
}
if discovery_hash in hass.data[ALREADY_DISCOVERED]:
# Dispatch update
_LOGGER.info(
"Component has already been discovered: %s %s, sending update",
component,
discovery_id,
)
async_dispatcher_send(
hass, MQTT_DISCOVERY_UPDATED.format(discovery_hash), payload
)
elif payload:
# Add component
_LOGGER.info("Found new component: %s %s", component, discovery_id)
hass.data[ALREADY_DISCOVERED][discovery_hash] = None
config_entries_key = f"{component}.mqtt"
async with hass.data[DATA_CONFIG_ENTRY_LOCK]:
if config_entries_key not in hass.data[CONFIG_ENTRY_IS_SETUP]:
if component == "device_automation":
# Local import to avoid circular dependencies
# pylint: disable=import-outside-toplevel
from . import device_automation
await device_automation.async_setup_entry(hass, config_entry)
elif component == "tag":
# Local import to avoid circular dependencies
# pylint: disable=import-outside-toplevel
from . import tag
await tag.async_setup_entry(hass, config_entry)
else:
await hass.config_entries.async_forward_entry_setup(
config_entry, component
)
hass.data[CONFIG_ENTRY_IS_SETUP].add(config_entries_key)
async_dispatcher_send(
hass, MQTT_DISCOVERY_NEW.format(component, "mqtt"), payload
)
else:
# Unhandled discovery message
async_dispatcher_send(
hass, MQTT_DISCOVERY_DONE.format(discovery_hash), None
)
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[DATA_CONFIG_FLOW_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
hass.data[ALREADY_DISCOVERED] = {}
hass.data[PENDING_DISCOVERED] = {}
discovery_topics = [
f"{discovery_topic}/+/+/config",
f"{discovery_topic}/+/+/+/config",
]
hass.data[DISCOVERY_UNSUBSCRIBE] = await asyncio.gather(
*(
mqtt.async_subscribe(hass, topic, async_discovery_message_received, 0)
for topic in discovery_topics
)
)
hass.data[LAST_DISCOVERY] = time.time()
mqtt_integrations = await async_get_mqtt(hass)
hass.data[INTEGRATION_UNSUBSCRIBE] = {}
for (integration, topics) in mqtt_integrations.items():
async def async_integration_message_received(integration, msg):
"""Process the received message."""
key = f"{integration}_{msg.subscribed_topic}"
# Lock to prevent initiating many parallel config flows.
# Note: The lock is not intended to prevent a race, only for performance
async with hass.data[DATA_CONFIG_FLOW_LOCK]:
# Already unsubscribed
if key not in hass.data[INTEGRATION_UNSUBSCRIBE]:
return
data = mqtt.MqttServiceInfo(
topic=msg.topic,
payload=msg.payload,
qos=msg.qos,
retain=msg.retain,
subscribed_topic=msg.subscribed_topic,
timestamp=msg.timestamp,
)
result = await hass.config_entries.flow.async_init(
integration, context={"source": DOMAIN}, data=data
)
if (
result
and result["type"] == RESULT_TYPE_ABORT
and result["reason"]
in ("already_configured", "single_instance_allowed")
):
unsub = hass.data[INTEGRATION_UNSUBSCRIBE].pop(key, None)
if unsub is None:
return
unsub()
for topic in topics:
key = f"{integration}_{topic}"
hass.data[INTEGRATION_UNSUBSCRIBE][key] = await mqtt.async_subscribe(
hass,
topic,
functools.partial(async_integration_message_received, integration),
0,
)
async def async_stop(hass: HomeAssistant) -> None:
"""Stop MQTT Discovery."""
if DISCOVERY_UNSUBSCRIBE in hass.data:
for unsub in hass.data[DISCOVERY_UNSUBSCRIBE]:
unsub()
hass.data[DISCOVERY_UNSUBSCRIBE] = []
if INTEGRATION_UNSUBSCRIBE in hass.data:
for key, unsub in list(hass.data[INTEGRATION_UNSUBSCRIBE].items()):
unsub()
hass.data[INTEGRATION_UNSUBSCRIBE].pop(key)
| |
import os
import pytest
import yaml
import shutil
from util import update_dict
from user_sync.config import ConfigFileLoader, ConfigLoader, DictConfig
from user_sync import flags
from user_sync.error import AssertionException
def load_ldap_config_options(args):
from user_sync.connector.directory import DirectoryConnector
from user_sync.connector.directory_ldap import LDAPDirectoryConnector
config_loader = ConfigLoader(args)
dc_mod_name = config_loader.get_directory_connector_module_name()
dc_mod = __import__(dc_mod_name, fromlist=[''])
dc = DirectoryConnector(dc_mod)
dc_config_options = config_loader.get_directory_connector_options(dc.name)
caller_config = DictConfig('%s configuration' % dc.name, dc_config_options)
return LDAPDirectoryConnector.get_options(caller_config)
@pytest.fixture
def root_config_file(fixture_dir):
return os.path.join(fixture_dir, 'user-sync-config.yml')
@pytest.fixture
def ldap_config_file(fixture_dir):
return os.path.join(fixture_dir, 'connector-ldap.yml')
@pytest.fixture
def umapi_config_file(fixture_dir):
return os.path.join(fixture_dir, 'connector-umapi.yml')
@pytest.fixture
def extension_config_file(fixture_dir):
return os.path.join(fixture_dir, 'extension-config.yml')
@pytest.fixture
def tmp_config_files(root_config_file, ldap_config_file, umapi_config_file, tmpdir):
tmpfiles = []
for fname in [root_config_file, ldap_config_file, umapi_config_file]:
basename = os.path.split(fname)[-1]
tmpfile = os.path.join(str(tmpdir), basename)
shutil.copy(fname, tmpfile)
tmpfiles.append(tmpfile)
return tuple(tmpfiles)
@pytest.fixture
def tmp_extension_config(extension_config_file, tmpdir):
tmpfile = os.path.join(str(tmpdir), os.path.split(extension_config_file)[-1])
shutil.copy(extension_config_file, tmpfile)
return tmpfile
@pytest.fixture
def modify_root_config(tmp_config_files):
(root_config_file, _, _) = tmp_config_files
def _modify_root_config(keys, val):
conf = yaml.safe_load(open(root_config_file))
conf = update_dict(conf, keys, val)
yaml.dump(conf, open(root_config_file, 'w'))
return root_config_file
return _modify_root_config
@pytest.fixture
def modify_ldap_config(tmp_config_files):
(_, ldap_config_file, _) = tmp_config_files
def _modify_ldap_config(keys, val):
conf = yaml.safe_load(open(ldap_config_file))
conf = update_dict(conf, keys, val)
yaml.dump(conf, open(ldap_config_file, 'w'))
return ldap_config_file
return _modify_ldap_config
def test_load_root(root_config_file):
"""Load root config file and test for presence of root-level keys"""
config = ConfigFileLoader.load_root_config(root_config_file)
assert isinstance(config, dict)
assert ('adobe_users' in config and 'directory_users' in config and
'logging' in config and 'limits' in config and
'invocation_defaults' in config)
def test_max_adobe_percentage(modify_root_config, cli_args):
root_config_file = modify_root_config(['limits', 'max_adobe_only_users'], "50%")
config = ConfigFileLoader.load_root_config(root_config_file)
assert ('limits' in config and 'max_adobe_only_users' in config['limits'] and
config['limits']['max_adobe_only_users'] == "50%")
args = cli_args({'config_filename': root_config_file})
options = ConfigLoader(args).get_rule_options()
assert 'max_adobe_only_users' in options and options['max_adobe_only_users'] == '50%'
modify_root_config(['limits', 'max_adobe_only_users'], "error%")
with pytest.raises(AssertionException):
ConfigLoader(args).get_rule_options()
def test_additional_groups_config(modify_root_config, cli_args):
addl_groups = [
{"source": r"ACL-(.+)", "target": r"ACL-Grp-(\1)"},
{"source": r"(.+)-ACL", "target": r"ACL-Grp-(\1)"},
]
root_config_file = modify_root_config(['directory_users', 'additional_groups'], addl_groups)
config = ConfigFileLoader.load_root_config(root_config_file)
assert ('additional_groups' in config['directory_users'] and
len(config['directory_users']['additional_groups']) == 2)
args = cli_args({'config_filename': root_config_file})
options = ConfigLoader(args).get_rule_options()
assert addl_groups[0]['source'] in options['additional_groups'][0]['source'].pattern
assert addl_groups[1]['source'] in options['additional_groups'][1]['source'].pattern
def test_twostep_config(tmp_config_files, modify_ldap_config, cli_args):
(root_config_file, ldap_config_file, _) = tmp_config_files
modify_ldap_config(['two_steps_lookup'], {})
args = cli_args({'config_filename': root_config_file})
# test invalid "two_steps_lookup" config
with pytest.raises(AssertionException):
load_ldap_config_options(args)
# test valid "two_steps_lookup" config with "group_member_filter_format" still set
modify_ldap_config(['two_steps_lookup', 'group_member_attribute_name'], 'member')
with pytest.raises(AssertionException):
load_ldap_config_options(args)
# test valid "two_steps_lookup" setup
modify_ldap_config(['two_steps_lookup', 'group_member_attribute_name'], 'member')
modify_ldap_config(['group_member_filter_format'], "")
options = load_ldap_config_options(args)
assert 'two_steps_enabled' in options
assert 'two_steps_lookup' in options
assert 'group_member_attribute_name' in options['two_steps_lookup']
assert options['two_steps_lookup']['group_member_attribute_name'] == 'member'
def test_adobe_users_config(tmp_config_files, modify_root_config, cli_args):
(root_config_file, _, _) = tmp_config_files
args = cli_args({'config_filename': root_config_file})
# test default
config_loader = ConfigLoader(args)
options = config_loader.load_invocation_options()
assert 'adobe_users' in options
assert options['adobe_users'] == ['all']
# test default invocation
modify_root_config(['invocation_defaults', 'adobe_users'], "mapped")
config_loader = ConfigLoader(args)
options = config_loader.load_invocation_options()
assert 'adobe_users' in options
assert options['adobe_users'] == ['mapped']
# test command line param
modify_root_config(['invocation_defaults', 'adobe_users'], "all")
args = cli_args({'config_filename': root_config_file, 'adobe_users': ['mapped']})
config_loader = ConfigLoader(args)
options = config_loader.load_invocation_options()
assert 'adobe_users' in options
assert options['adobe_users'] == ['mapped']
def test_extension_load(tmp_config_files, modify_root_config, cli_args, tmp_extension_config, monkeypatch):
"""Test that extension config is loaded when config option is specified"""
with monkeypatch.context() as m:
m.setattr(flags, 'get_flag', lambda *a: True)
(root_config_file, _, _) = tmp_config_files
args = cli_args({'config_filename': root_config_file})
options = ConfigLoader(args).get_rule_options()
assert 'after_mapping_hook' in options and options['after_mapping_hook'] is None
modify_root_config(['directory_users', 'extension'], tmp_extension_config)
options = ConfigLoader(args).get_rule_options()
assert 'after_mapping_hook' in options and options['after_mapping_hook'] is not None
def test_extension_flag(tmp_config_files, modify_root_config, cli_args, tmp_extension_config, monkeypatch):
"""Test that extension flag will prevent after-map hook from running"""
with monkeypatch.context() as m:
m.setattr(flags, 'get_flag', lambda *a: False)
(root_config_file, _, _) = tmp_config_files
args = cli_args({'config_filename': root_config_file})
modify_root_config(['directory_users', 'extension'], tmp_extension_config)
options = ConfigLoader(args).get_rule_options()
assert 'after_mapping_hook' in options and options['after_mapping_hook'] is None
def test_shell_exec_flag(tmp_config_files, modify_root_config, cli_args, monkeypatch):
"""Test that shell exec flag will raise an error if command is specified to get connector config"""
from user_sync.connector.directory import DirectoryConnector
with monkeypatch.context() as m:
m.setattr(flags, 'get_flag', lambda *a: False)
(root_config_file, _, _) = tmp_config_files
args = cli_args({'config_filename': root_config_file})
modify_root_config(['directory_users', 'connectors', 'ldap'], "$(some command)")
config_loader = ConfigLoader(args)
directory_connector_module_name = config_loader.get_directory_connector_module_name()
if directory_connector_module_name is not None:
directory_connector_module = __import__(directory_connector_module_name, fromlist=[''])
directory_connector = DirectoryConnector(directory_connector_module)
with pytest.raises(AssertionException):
config_loader.get_directory_connector_options(directory_connector.name)
| |
#
# Copyright (c) 2015 The heketi Authors
#
# This file is licensed to you under your choice of the GNU Lesser
# General Public License, version 3 or any later version (LGPLv3 or
# later), as published by the Free Software Foundation,
# or under the Apache License, Version 2.0 <LICENSE-APACHE2 or
# http://www.apache.org/licenses/LICENSE-2.0>.
#
# You may not use this file except in compliance with those terms.
#
import unittest
import requests
import heketi
from heketi import HeketiClient
TEST_ADMIN_KEY = "My Secret"
TEST_SERVER = "http://localhost:8080"
TEST_POLL_DELAY = 0.2
class test_heketi(unittest.TestCase):
def test_cluster(self):
c = HeketiClient(TEST_SERVER, "admin", TEST_ADMIN_KEY)
cluster_req = {}
cluster_req['block'] = True
cluster_req['file'] = True
cluster = c.cluster_create(cluster_req)
self.assertNotEqual(cluster['id'], "")
self.assertEqual(len(cluster['nodes']), 0)
self.assertEqual(len(cluster['volumes']), 0)
self.assertTrue(cluster['block'])
self.assertTrue(cluster['file'])
# Request bad id
with self.assertRaises(requests.exceptions.HTTPError):
c.cluster_info("bad")
# Get info about the cluster
info = c.cluster_info(cluster['id'])
self.assertEqual(info, cluster)
# change cluster flags
cluster_setflags_req = {}
cluster_setflags_req['block'] = False
cluster_setflags_req['file'] = True
ok = c.cluster_setflags(cluster['id'], cluster_setflags_req)
self.assertTrue(ok)
# verify the cluster flags have changed
info = c.cluster_info(cluster['id'])
self.assertEqual(info['id'], cluster['id'])
self.assertFalse(info['block'])
self.assertTrue(info['file'])
# Get a list of clusters
list = c.cluster_list()
self.assertEqual(1, len(list['clusters']))
self.assertEqual(list['clusters'][0], cluster['id'])
# Delete non-existent cluster
with self.assertRaises(requests.exceptions.HTTPError):
c.cluster_delete("badid")
# Delete current cluster
self.assertTrue(c.cluster_delete(info['id']))
def test_node(self):
node_req = {}
c = HeketiClient(TEST_SERVER, "admin", TEST_ADMIN_KEY,
poll_delay=TEST_POLL_DELAY)
self.assertNotEqual(c, '')
# Create cluster
cluster_req = {}
cluster_req['block'] = True
cluster_req['file'] = True
cluster = c.cluster_create(cluster_req)
self.assertNotEqual(cluster['id'], "")
self.assertEqual(len(cluster['nodes']), 0)
self.assertEqual(len(cluster['volumes']), 0)
# Add node to unknown cluster
node_req['cluster'] = "bad_id"
node_req['zone'] = 10
node_req['hostnames'] = {
"manage": ["node1-manage.gluster.lab.com"],
"storage": ["node1-storage.gluster.lab.com"]
}
with self.assertRaises(requests.exceptions.HTTPError):
c.node_add(node_req)
# Create node request packet
node_req['cluster'] = cluster['id']
node = c.node_add(node_req)
self.assertEqual(node['zone'], node_req['zone'])
self.assertNotEqual(node['id'], "")
self.assertEqual(node_req['hostnames'], node['hostnames'])
self.assertEqual(len(node['devices']), 0)
# Info on invalid id
with self.assertRaises(requests.exceptions.HTTPError):
c.node_info("badid")
# Get node info
info = c.node_info(node['id'])
self.assertEqual(info, node)
self.assertEqual(info['state'], 'online')
# Set offline
state = {}
state['state'] = 'offline'
self.assertTrue(c.node_state(node['id'], state))
# Get node info
info = c.node_info(node['id'])
self.assertEqual(info['state'], 'offline')
state['state'] = 'online'
self.assertTrue(c.node_state(node['id'], state))
info = c.node_info(node['id'])
self.assertEqual(info['state'], 'online')
# Delete invalid node
with self.assertRaises(requests.exceptions.HTTPError):
c.node_delete("badid")
# Can't delete cluster with a node
with self.assertRaises(requests.exceptions.HTTPError):
c.cluster_delete(cluster['id'])
# Delete node
del_node = c.node_delete(node['id'])
self.assertTrue(del_node)
# Delete cluster
del_cluster = c.cluster_delete(cluster['id'])
self.assertTrue(del_cluster)
def test_device(self):
# Create app
c = HeketiClient(TEST_SERVER, "admin", TEST_ADMIN_KEY,
poll_delay=TEST_POLL_DELAY)
# Create cluster
cluster_req = {}
cluster_req['block'] = True
cluster_req['file'] = True
cluster = c.cluster_create(cluster_req)
self.assertNotEqual(cluster['id'], '')
# Create node
node_req = {}
node_req['cluster'] = cluster['id']
node_req['zone'] = 10
node_req['hostnames'] = {
"manage": ["node1-manage.gluster.lab.com"],
"storage": ["node1-storage.gluster.lab.com"]
}
node = c.node_add(node_req)
self.assertNotEqual(node['id'], '')
# Create a device request
device_req = {}
device_req['name'] = "/dev/sda"
device_req['node'] = node['id']
device = c.device_add(device_req)
self.assertTrue(device)
# Get node information
info = c.node_info(node['id'])
self.assertEqual(len(info['devices']), 1)
self.assertEqual(len(info['devices'][0]['bricks']), 0)
self.assertEqual(info['devices'][0]['name'], device_req['name'])
self.assertNotEqual(info['devices'][0]['id'], '')
# Get info from an unknown id
with self.assertRaises(requests.exceptions.HTTPError):
c.device_info("badid")
# Get device information
device_id = info['devices'][0]['id']
device_info = c.device_info(device_id)
self.assertEqual(device_info, info['devices'][0])
# Set offline
state = {}
state['state'] = 'offline'
self.assertTrue(c.device_state(device_id, state))
# Get device info
info = c.device_info(device_id)
self.assertEqual(info['state'], 'offline')
state['state'] = 'online'
self.assertTrue(c.device_state(device_id, state))
info = c.device_info(device_id)
self.assertEqual(info['state'], 'online')
# Resync device
device_resync = c.device_resync(device_id)
self.assertTrue(device_resync)
# Try to delete node, and will not until we delete the device
with self.assertRaises(requests.exceptions.HTTPError):
c.node_delete(node['id'])
# Delete unknown device
with self.assertRaises(requests.exceptions.HTTPError):
c.node_delete("badid")
# Set device to offline
state = {}
state['state'] = 'offline'
self.assertTrue(c.device_state(device_id, state))
# Set device to failed
state = {}
state['state'] = 'failed'
self.assertTrue(c.device_state(device_id, state))
# Delete device
device_delete = c.device_delete(device_info['id'])
self.assertTrue(device_delete)
# Delete node
node_delete = c.node_delete(node['id'])
self.assertTrue(node_delete)
# Delete cluster
cluster_delete = c.cluster_delete(cluster['id'])
self.assertTrue(cluster_delete)
def test_volume(self):
# Create cluster
c = HeketiClient(TEST_SERVER, "admin", TEST_ADMIN_KEY,
poll_delay=TEST_POLL_DELAY)
self.assertEqual(True, c != '')
cluster_req = {}
cluster_req['block'] = True
cluster_req['file'] = True
cluster = c.cluster_create(cluster_req)
self.assertNotEqual(cluster['id'], '')
# Create node request packet
print ("Creating Cluster")
for i in range(3):
node_req = {}
node_req['cluster'] = cluster['id']
node_req['hostnames'] = {
"manage": ["node%s-manage.gluster.lab.com" % (i)],
"storage": ["node%s-storage.gluster.lab.com" % (i)]}
node_req['zone'] = i + 1
# Create node
node = c.node_add(node_req)
self.assertNotEqual(node['id'], '')
# Create and add devices
for i in range(1, 4):
device_req = {}
device_req['name'] = "/dev/sda%s" % (i)
device_req['node'] = node['id']
device = c.device_add(device_req)
self.assertTrue(device)
# Get list of volumes
list = c.volume_list()
self.assertEqual(len(list['volumes']), 0)
# Create a volume
print ("Creating a volume")
volume_req = {}
volume_req['size'] = 10
volume = c.volume_create(volume_req)
self.assertNotEqual(volume['id'], "")
self.assertEqual(volume['size'], volume_req['size'])
# Get list of volumes
list = c.volume_list()
self.assertEqual(len(list['volumes']), 1)
self.assertEqual(list['volumes'][0], volume['id'])
# Get info on incorrect id
with self.assertRaises(requests.exceptions.HTTPError):
c.volume_info("badid")
# Get info
info = c.volume_info(volume['id'])
self.assertEqual(info, volume)
# Expand volume with a bad id
volume_ex_params = {}
volume_ex_params['expand_size'] = 10
with self.assertRaises(requests.exceptions.HTTPError):
c.volume_expand("badid", volume_ex_params)
# Expand volume
print ("Expanding volume")
volumeInfo = c.volume_expand(volume['id'], volume_ex_params)
self.assertEqual(volumeInfo['size'], 20)
# Delete bad id
with self.assertRaises(requests.exceptions.HTTPError):
c.volume_delete("badid")
# Delete volume
print ("Deleting volume")
volume_delete = c.volume_delete(volume['id'])
self.assertTrue(volume_delete)
print ("Deleting Cluster")
clusterInfo = c.cluster_info(cluster['id'])
for node_id in clusterInfo['nodes']:
# Get node information
nodeInfo = c.node_info(node_id)
# Delete all devices
for device in nodeInfo['devices']:
devid = device['id']
self.assertTrue(c.device_state(devid, {'state': 'offline'}))
self.assertTrue(c.device_state(devid, {'state': 'failed'}))
device_delete = c.device_delete(devid)
self.assertTrue(device_delete)
# Delete node
node_delete = c.node_delete(node_id)
self.assertTrue(node_delete)
# Delete cluster
cluster_delete = c.cluster_delete(cluster['id'])
self.assertTrue(cluster_delete)
def test_node_tags(self):
# Create app
c = HeketiClient(TEST_SERVER, "admin", TEST_ADMIN_KEY,
poll_delay=TEST_POLL_DELAY)
# Create cluster
cluster_req = {}
cluster_req['block'] = True
cluster_req['file'] = True
cluster = c.cluster_create(cluster_req)
self.assertNotEqual(cluster['id'], '')
# Create node
node_req = {}
node_req['cluster'] = cluster['id']
node_req['zone'] = 10
node_req['hostnames'] = {
"manage": ["node1-manage.gluster.lab.com"],
"storage": ["node1-storage.gluster.lab.com"]
}
node_req["tags"] = {
"foo": "bar",
"speed": "ultra",
}
node = c.node_add(node_req)
self.assertNotEqual(node['id'], '')
node_id = node['id']
nodeInfo = c.node_info(node_id)
self.assertEqual(nodeInfo['tags'], {
"foo": "bar",
"speed": "ultra",
})
# add some new tags
r = c.node_set_tags(node_id, dict(
change_type=heketi.TAGS_UPDATE,
tags={"robot": "bender"}))
self.assertTrue(r)
nodeInfo = c.node_info(node_id)
self.assertEqual(nodeInfo['tags'], {
"foo": "bar",
"speed": "ultra",
"robot": "bender",
})
# reset tags to empty
r = c.node_set_tags(node_id, dict(
change_type=heketi.TAGS_SET,
tags={}))
self.assertTrue(r)
nodeInfo = c.node_info(node_id)
self.assertFalse(nodeInfo.get('tags'))
# add some new tags back
r = c.node_set_tags(node_id, dict(
change_type=heketi.TAGS_UPDATE,
tags={"robot": "bender", "fish": "bulb"}))
self.assertTrue(r)
nodeInfo = c.node_info(node_id)
self.assertEqual(nodeInfo['tags'], {
"robot": "bender",
"fish": "bulb",
})
# delete a particular tag
r = c.node_set_tags(node_id, dict(
change_type=heketi.TAGS_DELETE,
tags={"robot": ""}))
self.assertTrue(r)
nodeInfo = c.node_info(node_id)
self.assertEqual(nodeInfo['tags'], {
"fish": "bulb",
})
# invalid change_type raises error
with self.assertRaises(requests.exceptions.HTTPError):
c.node_set_tags(node_id, dict(
change_type="zoidberg",
tags={"robot": "flexo"}))
# invalid tag name raises error
with self.assertRaises(requests.exceptions.HTTPError):
c.node_set_tags(node_id, dict(
change_type=heketi.TAGS_UPDATE,
tags={"$! W ~~~": "ok"}))
# check nothing changed
nodeInfo = c.node_info(node_id)
self.assertEqual(nodeInfo['tags'], {
"fish": "bulb",
})
# Delete node
node_delete = c.node_delete(node['id'])
self.assertTrue(node_delete)
# Delete cluster
cluster_delete = c.cluster_delete(cluster['id'])
self.assertTrue(cluster_delete)
def test_device_tags(self):
# Create app
c = HeketiClient(TEST_SERVER, "admin", TEST_ADMIN_KEY,
poll_delay=TEST_POLL_DELAY)
# Create cluster
cluster_req = {}
cluster_req['block'] = True
cluster_req['file'] = True
cluster = c.cluster_create(cluster_req)
self.assertNotEqual(cluster['id'], '')
# Create node
node_req = {}
node_req['cluster'] = cluster['id']
node_req['zone'] = 10
node_req['hostnames'] = {
"manage": ["node1-manage.gluster.lab.com"],
"storage": ["node1-storage.gluster.lab.com"]
}
node = c.node_add(node_req)
self.assertNotEqual(node['id'], '')
# Create a device (with tags)
device_req = {}
device_req['name'] = "/dev/sda"
device_req['node'] = node['id']
device_req["tags"] = {
"foo": "bar",
"speed": "ultra",
}
device = c.device_add(device_req)
self.assertTrue(device)
# get information
info = c.node_info(node['id'])
self.assertEqual(len(info['devices']), 1)
device_id = info['devices'][0]['id']
# check tags on device
device_info = c.device_info(device_id)
self.assertEqual(device_info['tags'], {
"foo": "bar",
"speed": "ultra",
})
# add some new tags
r = c.device_set_tags(device_id, dict(
change_type=heketi.TAGS_UPDATE,
tags={"robot": "calculon"}))
self.assertTrue(r)
device_info = c.device_info(device_id)
self.assertEqual(device_info['tags'], {
"foo": "bar",
"speed": "ultra",
"robot": "calculon",
})
# reset tags to empty
r = c.device_set_tags(device_id, dict(
change_type=heketi.TAGS_SET,
tags={}))
self.assertTrue(r)
device_info = c.device_info(device_id)
self.assertFalse(device_info.get('tags'))
# add some new tags back
r = c.device_set_tags(device_id, dict(
change_type=heketi.TAGS_UPDATE,
tags={"robot": "calculon", "fish": "blinky"}))
self.assertTrue(r)
device_info = c.device_info(device_id)
self.assertEqual(device_info['tags'], {
"robot": "calculon",
"fish": "blinky",
})
# delete a particular tag
r = c.device_set_tags(device_id, dict(
change_type=heketi.TAGS_DELETE,
tags={"robot": ""}))
self.assertTrue(r)
device_info = c.device_info(device_id)
self.assertEqual(device_info['tags'], {
"fish": "blinky",
})
# invalid change_type raises error
with self.assertRaises(requests.exceptions.HTTPError):
c.device_set_tags(device_id, dict(
change_type="hermes",
tags={"robot": "flexo"}))
# invalid tag name raises error
with self.assertRaises(requests.exceptions.HTTPError):
c.device_set_tags(device_id, dict(
change_type=heketi.TAGS_UPDATE,
tags={"": "ok"}))
# check nothing changed
device_info = c.device_info(device_id)
self.assertEqual(device_info['tags'], {
"fish": "blinky",
})
# delete device
self.assertTrue(c.device_state(device_id, {'state': 'offline'}))
self.assertTrue(c.device_state(device_id, {'state': 'failed'}))
self.assertTrue(c.device_delete(device_id))
# Delete node
node_delete = c.node_delete(node['id'])
self.assertTrue(node_delete)
# Delete cluster
cluster_delete = c.cluster_delete(cluster['id'])
self.assertTrue(cluster_delete)
if __name__ == '__main__':
unittest.main()
| |
import mock
import pytest
from api.base.settings.defaults import API_BASE
from api_tests.requests.mixins import NodeRequestTestMixin
from osf.utils import permissions
@pytest.mark.django_db
class TestCreateNodeRequestAction(NodeRequestTestMixin):
@pytest.fixture()
def url(self, node_request):
return '/{}actions/requests/'.format(API_BASE)
def create_payload(self, _id=None, **attrs):
payload = {
'data': {
'attributes': attrs,
'relationships': {},
'type': 'node-request-actions'
}
}
if _id:
payload['data']['relationships']['target'] = {
'data': {
'type': 'node-requests',
'id': _id
}
}
return payload
def test_requester_cannot_view(self, app, requester, url):
res = app.get(url, auth=requester.auth, expect_errors=True)
assert res.status_code == 405
def test_requester_cannot_approve(self, app, requester, url, node_request):
initial_state = node_request.machine_state
payload = self.create_payload(node_request._id, trigger='accept')
res = app.post_json_api(url, payload, auth=requester.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
def test_requester_cannot_reject(self, app, requester, url, node_request):
initial_state = node_request.machine_state
payload = self.create_payload(node_request._id, trigger='reject')
res = app.post_json_api(url, payload, auth=requester.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
def test_requester_can_edit_comment(self, app, requester, url, node_request):
initial_state = node_request.machine_state
initial_comment = node_request.comment
payload = self.create_payload(node_request._id, trigger='edit_comment', comment='ASDFG')
res = app.post_json_api(url, payload, auth=requester.auth)
assert res.status_code == 201
node_request.reload()
assert initial_state == node_request.machine_state
assert initial_comment != node_request.comment
def test_admin_can_approve(self, app, admin, url, node_request):
initial_state = node_request.machine_state
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='accept')
res = app.post_json_api(url, payload, auth=admin.auth)
assert res.status_code == 201
node_request.reload()
assert initial_state != node_request.machine_state
assert node_request.creator in node_request.target.contributors
def test_admin_can_reject(self, app, admin, url, node_request):
initial_state = node_request.machine_state
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='reject')
res = app.post_json_api(url, payload, auth=admin.auth)
assert res.status_code == 201
node_request.reload()
assert initial_state != node_request.machine_state
assert node_request.creator not in node_request.target.contributors
def test_admin_cannot_view(self, app, admin, url):
res = app.get(url, auth=admin.auth, expect_errors=True)
assert res.status_code == 405
def test_admin_cannot_edit_comment(self, app, admin, url, node_request):
initial_state = node_request.machine_state
initial_comment = node_request.comment
payload = self.create_payload(node_request._id, trigger='edit_comment', comment='ASDFG')
res = app.post_json_api(url, payload, auth=admin.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
assert initial_comment == node_request.comment
def test_write_contrib_cannot_approve(self, app, write_contrib, url, node_request):
initial_state = node_request.machine_state
payload = self.create_payload(node_request._id, trigger='accept')
res = app.post_json_api(url, payload, auth=write_contrib.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
def test_write_contrib_cannot_reject(self, app, write_contrib, url, node_request):
initial_state = node_request.machine_state
payload = self.create_payload(node_request._id, trigger='reject')
res = app.post_json_api(url, payload, auth=write_contrib.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
def test_write_contrib_cannot_view(self, app, write_contrib, url):
res = app.get(url, auth=write_contrib.auth, expect_errors=True)
assert res.status_code == 405
def test_write_contrib_cannot_edit_comment(self, app, write_contrib, url, node_request):
initial_state = node_request.machine_state
initial_comment = node_request.comment
payload = self.create_payload(node_request._id, trigger='edit_comment', comment='ASDFG')
res = app.post_json_api(url, payload, auth=write_contrib.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
assert initial_comment == node_request.comment
def test_noncontrib_cannot_approve(self, app, noncontrib, url, node_request):
initial_state = node_request.machine_state
payload = self.create_payload(node_request._id, trigger='accept')
res = app.post_json_api(url, payload, auth=noncontrib.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
def test_noncontrib_cannot_reject(self, app, noncontrib, url, node_request):
initial_state = node_request.machine_state
payload = self.create_payload(node_request._id, trigger='reject')
res = app.post_json_api(url, payload, auth=noncontrib.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
def test_noncontrib_cannot_view(self, app, noncontrib, url):
res = app.get(url, auth=noncontrib.auth, expect_errors=True)
assert res.status_code == 405
def test_noncontrib_cannot_edit_comment(self, app, noncontrib, url, node_request):
initial_state = node_request.machine_state
initial_comment = node_request.comment
payload = self.create_payload(node_request._id, trigger='edit_comment', comment='ASDFG')
res = app.post_json_api(url, payload, auth=noncontrib.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
assert initial_comment == node_request.comment
def test_edits_fail_with_requests_disabled(self, app, requester, url, node_request):
initial_state = node_request.machine_state
initial_comment = node_request.comment
payload = self.create_payload(node_request._id, trigger='edit_comment', comment='ASDFG')
node_request.target.access_requests_enabled = False
node_request.target.save()
res = app.post_json_api(url, payload, auth=requester.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
assert initial_comment == node_request.comment
def test_approves_fail_with_requests_disabled(self, app, admin, url, node_request):
initial_state = node_request.machine_state
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='accept')
node_request.target.access_requests_enabled = False
node_request.target.save()
res = app.post_json_api(url, payload, auth=admin.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
assert node_request.creator not in node_request.target.contributors
def test_rejects_fail_with_requests_disabled(self, app, admin, url, node_request):
initial_state = node_request.machine_state
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='reject')
node_request.target.access_requests_enabled = False
node_request.target.save()
res = app.post_json_api(url, payload, auth=admin.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
assert node_request.creator not in node_request.target.contributors
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_email_sent_on_approve(self, mock_mail, app, admin, url, node_request):
initial_state = node_request.machine_state
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='accept')
res = app.post_json_api(url, payload, auth=admin.auth)
assert res.status_code == 201
node_request.reload()
assert initial_state != node_request.machine_state
assert node_request.creator in node_request.target.contributors
assert mock_mail.call_count == 1
@mock.patch('website.mails.mails.send_mail')
def test_email_sent_on_reject(self, mock_mail, app, admin, url, node_request):
initial_state = node_request.machine_state
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='reject')
res = app.post_json_api(url, payload, auth=admin.auth)
assert res.status_code == 201
node_request.reload()
assert initial_state != node_request.machine_state
assert node_request.creator not in node_request.target.contributors
assert mock_mail.call_count == 1
@mock.patch('website.mails.mails.send_mail')
def test_email_not_sent_on_reject(self, mock_mail, app, requester, url, node_request):
initial_state = node_request.machine_state
initial_comment = node_request.comment
payload = self.create_payload(node_request._id, trigger='edit_comment', comment='ASDFG')
res = app.post_json_api(url, payload, auth=requester.auth)
assert res.status_code == 201
node_request.reload()
assert initial_state == node_request.machine_state
assert initial_comment != node_request.comment
assert mock_mail.call_count == 0
def test_set_permissions_on_approve(self, app, admin, url, node_request):
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='accept', permissions='admin')
res = app.post_json_api(url, payload, auth=admin.auth)
assert res.status_code == 201
node_request.reload()
assert node_request.target.has_permission(node_request.creator, permissions.ADMIN)
def test_set_visible_on_approve(self, app, admin, url, node_request):
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='accept', visible=False)
res = app.post_json_api(url, payload, auth=admin.auth)
assert res.status_code == 201
node_request.reload()
assert node_request.creator in node_request.target.contributors
assert not node_request.target.get_visible(node_request.creator)
def test_accept_request_defaults_to_read_and_visible(self, app, admin, url, node_request):
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='accept')
res = app.post_json_api(url, payload, auth=admin.auth)
assert res.status_code == 201
node_request.reload()
assert node_request.creator in node_request.target.contributors
assert node_request.target.has_permission(node_request.creator, permissions.READ)
assert node_request.target.get_visible(node_request.creator)
| |
# Copyright 2010 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import re
import stevedore
from oslo.config import cfg
from oslo import messaging
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.compute import flavors
from nova import exception
from nova.image import glance
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import policy
from nova import utils
CONF = cfg.CONF
CONF.import_opt('enable_instance_password',
'nova.api.openstack.compute.servers')
CONF.import_opt('network_api_class', 'nova.network')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
CONF.import_opt('extensions_blacklist', 'nova.api.openstack', group='osapi_v3')
CONF.import_opt('extensions_whitelist', 'nova.api.openstack', group='osapi_v3')
LOG = logging.getLogger(__name__)
authorizer = extensions.core_authorizer('compute:v3', 'servers')
class ServersController(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
EXTENSION_CREATE_NAMESPACE = 'nova.api.v3.extensions.server.create'
EXTENSION_DESERIALIZE_EXTRACT_SERVER_NAMESPACE = (
'nova.api.v3.extensions.server.create.deserialize')
EXTENSION_REBUILD_NAMESPACE = 'nova.api.v3.extensions.server.rebuild'
EXTENSION_DESERIALIZE_EXTRACT_REBUILD_NAMESPACE = (
'nova.api.v3.extensions.server.rebuild.deserialize')
EXTENSION_UPDATE_NAMESPACE = 'nova.api.v3.extensions.server.update'
_view_builder_class = views_servers.ViewBuilderV3
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = filter(lambda l: l['rel'] == 'self',
robj.obj['server']['links'])
if link:
robj['Location'] = utils.utf8(link[0]['href'])
# Convenience return
return robj
def __init__(self, **kwargs):
def _check_load_extension(required_function):
def check_whiteblack_lists(ext):
# Check whitelist is either empty or if not then the extension
# is in the whitelist
if (not CONF.osapi_v3.extensions_whitelist or
ext.obj.alias in CONF.osapi_v3.extensions_whitelist):
# Check the extension is not in the blacklist
if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist:
return True
else:
LOG.warning(_("Not loading %s because it is "
"in the blacklist"), ext.obj.alias)
return False
else:
LOG.warning(
_("Not loading %s because it is not in the whitelist"),
ext.obj.alias)
return False
def check_load_extension(ext):
if isinstance(ext.obj, extensions.V3APIExtensionBase):
# Filter out for the existence of the required
# function here rather than on every request. We
# don't have a new abstract base class to reduce
# duplication in the extensions as they may want
# to implement multiple server (and other) entry
# points if hasattr(ext.obj, 'server_create'):
if hasattr(ext.obj, required_function):
LOG.debug('extension %(ext_alias)s detected by '
'servers extension for function %(func)s',
{'ext_alias': ext.obj.alias,
'func': required_function})
return check_whiteblack_lists(ext)
else:
LOG.debug(
'extension %(ext_alias)s is missing %(func)s',
{'ext_alias': ext.obj.alias,
'func': required_function})
return False
else:
return False
return check_load_extension
self.extension_info = kwargs.pop('extension_info')
super(ServersController, self).__init__(**kwargs)
self.compute_api = compute.API()
# Look for implementation of extension point of server creation
self.create_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_CREATE_NAMESPACE,
check_func=_check_load_extension('server_create'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.create_extension_manager):
LOG.debug("Did not find any server create extensions")
# Look for implementation of extension point of server rebuild
self.rebuild_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_REBUILD_NAMESPACE,
check_func=_check_load_extension('server_rebuild'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.rebuild_extension_manager):
LOG.debug("Did not find any server rebuild extensions")
# Look for implementation of extension point of server update
self.update_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_UPDATE_NAMESPACE,
check_func=_check_load_extension('server_update'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.update_extension_manager):
LOG.debug("Did not find any server update extensions")
def index(self, req):
"""Returns a list of server names and ids for a given user."""
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def detail(self, req):
"""Returns a list of server details for a given user."""
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options())
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
status = search_opts.pop('status', None)
if status is not None:
vm_state, task_state = common.task_and_vm_state_from_status(status)
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
# When we search by vm state, task state will return 'default'.
# So we don't need task_state search_opt.
if 'default' not in task_state:
search_opts['task_state'] = task_state
if 'changes_since' in search_opts:
try:
parsed = timeutils.parse_isotime(search_opts['changes_since'])
except ValueError:
msg = _('Invalid changes_since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes_since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes_since' is specified, because 'changes_since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
if 'changes_since' not in search_opts:
# No 'changes_since', so we only want non-deleted servers
search_opts['deleted'] = False
if 'changes_since' in search_opts:
search_opts['changes-since'] = search_opts.pop('changes_since')
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPForbidden(explanation=msg)
# If tenant_id is passed as a search parameter this should
# imply that all_tenants is also enabled unless explicitly
# disabled. Note that the tenant_id parameter is filtered out
# by remove_invalid_options above unless the requestor is an
# admin.
if 'tenant_id' in search_opts and not 'all_tenants' in search_opts:
# We do not need to add the all_tenants flag if the tenant
# id associated with the token is the tenant id
# specified. This is done so a request that does not need
# the all_tenants flag does not fail because of lack of
# policy permission for compute:get_all_tenants when it
# doesn't actually need it.
if context.project_id != search_opts.get('tenant_id'):
search_opts['all_tenants'] = 1
# If all tenants is passed with 0 or false as the value
# then remove it from the search options. Nothing passed as
# the value for all_tenants is considered to enable the feature
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
if not strutils.bool_from_string(all_tenants, True):
del search_opts['all_tenants']
except ValueError as err:
raise exception.InvalidInput(str(err))
if 'all_tenants' in search_opts:
policy.enforce(context, 'compute:get_all_tenants',
{'project_id': context.project_id,
'user_id': context.user_id})
del search_opts['all_tenants']
else:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
try:
instance_list = self.compute_api.get_all(context,
search_opts=search_opts, limit=limit, marker=marker,
want_objects=True, expected_attrs=['pci_devices'])
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
log_msg = _("Flavor '%s' could not be found ")
LOG.debug(log_msg, search_opts['flavor'])
# TODO(mriedem): Move to ObjectListBase.__init__ for empty lists.
instance_list = instance_obj.InstanceList(objects=[])
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid):
"""Utility function for looking up an instance by uuid."""
instance = common.get_instance(self.compute_api, context,
instance_uuid, want_objects=True,
expected_attrs=['pci_devices'])
req.cache_db_instance(instance)
return instance
def _check_string_length(self, value, name, max_length=None):
try:
if isinstance(value, six.string_types):
value = value.strip()
utils.check_string_length(value, name, min_length=1,
max_length=max_length)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
def _validate_server_name(self, value):
self._check_string_length(value, 'Server name', max_length=255)
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
for network in requested_networks:
try:
# fixed IP address is optional
# if the fixed IP address is not provided then
# it will use one of the available IP address from the network
address = network.get('fixed_ip', None)
if address is not None and not utils.is_valid_ip_address(
address):
msg = _("Invalid fixed IP address (%s)") % address
raise exc.HTTPBadRequest(explanation=msg)
port_id = network.get('port', None)
if port_id:
network_uuid = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument: port")
raise exc.HTTPBadRequest(explanation=msg)
if not uuidutils.is_uuid_like(port_id):
msg = _("Bad port format: port uuid is "
"not in proper format "
"(%s)") % port_id
raise exc.HTTPBadRequest(explanation=msg)
if address is not None:
msg = _("Specified Fixed IP '%(addr)s' cannot be used "
"with port '%(port)s': port already has "
"a Fixed IP allocated.") % {"addr": address,
"port": port_id}
raise exc.HTTPBadRequest(explanation=msg)
else:
network_uuid = network['uuid']
if not port_id and not uuidutils.is_uuid_like(network_uuid):
br_uuid = network_uuid.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % network_uuid
raise exc.HTTPBadRequest(explanation=msg)
# For neutronv2, requested_networks
# should be tuple of (network_uuid, fixed_ip, port_id)
if utils.is_neutron():
networks.append((network_uuid, address, port_id))
else:
# check if the network id is already present in the list,
# we don't want duplicate networks to be passed
# at the boot time
for id, ip in networks:
if id == network_uuid:
expl = (_("Duplicate networks"
" (%s) are not allowed") %
network_uuid)
raise exc.HTTPBadRequest(explanation=expl)
networks.append((network_uuid, address))
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return networks
# NOTE(vish): Without this regex, b64decode will happily
# ignore illegal bytes in the base64 encoded
# data.
B64_REGEX = re.compile('^(?:[A-Za-z0-9+\/]{4})*'
'(?:[A-Za-z0-9+\/]{2}=='
'|[A-Za-z0-9+\/]{3}=)?$')
def _decode_base64(self, data):
data = re.sub(r'\s', '', data)
if not self.B64_REGEX.match(data):
return None
try:
return base64.b64decode(data)
except TypeError:
return None
def show(self, req, id):
"""Returns server details by server id."""
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, id,
want_objects=True,
expected_attrs=['pci_devices'])
req.cache_db_instance(instance)
return self._view_builder.show(req, instance)
@wsgi.response(202)
def create(self, req, body):
"""Creates a new server for a given user."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPBadRequest(_("The request body is invalid"))
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
if 'name' not in server_dict:
msg = _("Server name is not defined")
raise exc.HTTPBadRequest(explanation=msg)
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
# Arguments to be passed to instance create function
create_kwargs = {}
# Query extensions which want to manipulate the keyword
# arguments.
# NOTE(cyeoh): This is the hook that extensions use
# to replace the extension specific code below.
# When the extensions are ported this will also result
# in some convenience function from this class being
# moved to the extension
if list(self.create_extension_manager):
self.create_extension_manager.map(self._create_extension_point,
server_dict, create_kwargs)
image_uuid = self._image_from_req_data(server_dict, create_kwargs)
# NOTE(cyeoh): Although an extension can set
# return_reservation_id in order to request that a reservation
# id be returned to the client instead of the newly created
# instance information we do not want to pass this parameter
# to the compute create call which always returns both. We use
# this flag after the instance create call to determine what
# to return to the client
return_reservation_id = create_kwargs.pop('return_reservation_id',
False)
requested_networks = None
# TODO(cyeoh): bp v3-api-core-as-extensions
# Replace with an extension point when the os-networks
# extension is ported. Currently reworked
# to take into account is_neutron
#if (self.ext_mgr.is_loaded('os-networks')
# or utils.is_neutron()):
# requested_networks = server_dict.get('networks')
if utils.is_neutron():
requested_networks = server_dict.get('networks')
if requested_networks is not None:
requested_networks = self._get_requested_networks(
requested_networks)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _("Invalid flavor_ref provided.")
raise exc.HTTPBadRequest(explanation=msg)
try:
inst_type = flavors.get_flavor_by_flavor_id(
flavor_id, ctxt=context, read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=name,
metadata=server_dict.get('metadata', {}),
admin_password=password,
requested_networks=requested_networks,
**create_kwargs)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as error:
msg = _("Invalid flavor_ref provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound as error:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _("Invalid config_drive provided.")
raise exc.HTTPBadRequest(explanation=msg)
except messaging.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % unicode(error)
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.InvalidRequest,
exception.MultiplePortsNotApplicable,
exception.InstanceUserDataMalformed,
exception.PortNotFound,
exception.SecurityGroupNotFound,
exception.PortRequiresFixedIP,
exception.NetworkRequiresSubnet,
exception.NetworkNotFound) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
# If the caller wanted a reservation_id, return it
if return_reservation_id:
return wsgi.ResponseObject(
{'servers_reservation': {'reservation_id': resv_id}})
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['admin_password'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
def _create_extension_point(self, ext, server_dict, create_kwargs):
handler = ext.obj
LOG.debug("Running _create_extension_point for %s", ext.obj)
handler.server_create(server_dict, create_kwargs)
def _rebuild_extension_point(self, ext, rebuild_dict, rebuild_kwargs):
handler = ext.obj
LOG.debug("Running _rebuild_extension_point for %s", ext.obj)
handler.server_rebuild(rebuild_dict, rebuild_kwargs)
def _resize_extension_point(self, ext, resize_dict, resize_kwargs):
handler = ext.obj
LOG.debug("Running _resize_extension_point for %s", ext.obj)
handler.server_resize(resize_dict, resize_kwargs)
def _update_extension_point(self, ext, update_dict, update_kwargs):
handler = ext.obj
LOG.debug("Running _update_extension_point for %s", ext.obj)
handler.server_update(update_dict, update_kwargs)
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
try:
self.compute_api.soft_delete(context, instance)
except exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPBadRequest(_("The request body is invalid"))
ctxt = req.environ['nova.context']
update_dict = {}
if 'name' in body['server']:
name = body['server']['name']
self._validate_server_name(name)
update_dict['display_name'] = name.strip()
if 'host_id' in body['server']:
msg = _("host_id cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
if list(self.update_extension_manager):
self.update_extension_manager.map(self._update_extension_point,
body['server'], update_dict)
instance = common.get_instance(self.compute_api, ctxt, id,
want_objects=True,
expected_attrs=['pci_devices'])
try:
# NOTE(mikal): this try block needs to stay because save() still
# might throw an exception.
req.cache_db_instance(instance)
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
instance.save()
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(202)
@wsgi.action('confirm_resize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirm_resize')
@wsgi.response(202)
@wsgi.action('revert_resize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revert_resize')
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.action('reboot')
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
if not isinstance(body['reboot']['type'], six.string_types):
msg = _("Argument 'type' for reboot must be a string")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
valid_reboot_types = ['HARD', 'SOFT']
reboot_type = body['reboot']['type'].upper()
if not valid_reboot_types.count(reboot_type):
msg = _("Argument 'type' for reboot is not HARD or SOFT")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
msg = _("Missing argument 'type' for reboot")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot')
return webob.Response(status_int=202)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize')
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.response(204)
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete')
def _image_uuid_from_href(self, image_href):
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid image_ref provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
def _image_from_req_data(self, server_dict, create_kwargs):
"""Get image data from the request or raise appropriate
exceptions.
The field image_ref is mandatory when no block devices have been
defined and must be a proper uuid when present.
"""
image_href = server_dict.get('image_ref')
if not image_href and create_kwargs.get('block_device_mapping'):
return ''
elif image_href:
return self._image_uuid_from_href(unicode(image_href))
else:
msg = _("Missing image_ref attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _flavor_id_from_req_data(self, data):
try:
flavor_ref = data['server']['flavor_ref']
except (TypeError, KeyError):
msg = _("Missing flavor_ref attribute")
raise exc.HTTPBadRequest(explanation=msg)
return common.get_id_from_href(flavor_ref)
@wsgi.response(202)
@wsgi.action('resize')
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
resize_dict = body['resize']
try:
flavor_ref = str(resize_dict["flavor_ref"])
if not flavor_ref:
msg = _("Resize request has invalid 'flavor_ref' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
except (KeyError, TypeError):
msg = _("Resize requests require 'flavor_ref' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
resize_kwargs = {}
return self._resize(req, id, flavor_ref, **resize_kwargs)
@wsgi.response(202)
@wsgi.action('rebuild')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
rebuild_dict = body['rebuild']
try:
image_href = rebuild_dict["image_ref"]
except (KeyError, TypeError):
msg = _("Could not parse image_ref from request.")
raise exc.HTTPBadRequest(explanation=msg)
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(rebuild_dict)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {
'name': 'display_name',
'metadata': 'metadata',
}
rebuild_kwargs = {}
if 'name' in rebuild_dict:
self._validate_server_name(rebuild_dict['name'])
if 'preserve_ephemeral' in rebuild_dict:
rebuild_kwargs['preserve_ephemeral'] = strutils.bool_from_string(
rebuild_dict['preserve_ephemeral'], strict=True)
if list(self.rebuild_extension_manager):
self.rebuild_extension_manager.map(self._rebuild_extension_point,
rebuild_dict, rebuild_kwargs)
for request_attribute, instance_attribute in attr_map.items():
try:
rebuild_kwargs[instance_attribute] = rebuild_dict[
request_attribute]
except (KeyError, TypeError):
pass
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
**rebuild_kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild')
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
view = self._view_builder.show(req, instance)
# Add on the admin_password attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
view['server']['admin_password'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@wsgi.action('create_image')
@common.check_snapshots_enabled
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
entity = body.get("create_image", {})
image_name = entity.get("name")
if not image_name:
msg = _("create_image entity requires name attribute")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(context, req, id)
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance,
bdms):
img = instance['image_ref']
if not img:
props = bdms.root_metadata(
context, self.compute_api.image_service,
self.compute_api.volume_api)
image_meta = {'properties': props}
else:
src_image = self.compute_api.\
image_service.show(context, img)
image_meta = dict(src_image)
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_meta,
image_name,
extra_properties=props)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'create_image')
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# build location of newly-created image entity
image_id = str(image['id'])
image_ref = glance.generate_image_url(image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
password = server['admin_password']
self._validate_admin_password(password)
except KeyError:
password = utils.generate_password()
except ValueError:
raise exc.HTTPBadRequest(explanation=_("Invalid admin_password"))
return password
def _validate_admin_password(self, password):
if not isinstance(password, six.string_types):
raise ValueError()
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('reservation_id', 'name', 'status', 'image', 'flavor',
'ip', 'changes_since', 'all_tenants')
def _get_instance(self, context, instance_uuid):
try:
attrs = ['system_metadata', 'metadata']
return instance_obj.Instance.get_by_uuid(context, instance_uuid,
expected_attrs=attrs)
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
@extensions.expected_errors((404, 409))
@wsgi.action('start')
def _start_server(self, req, id, body):
"""Start an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorizer(context, instance, 'start')
LOG.debug('start instance', instance=instance)
try:
self.compute_api.start(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked,
exception.InstanceInvalidState) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.action('stop')
def _stop_server(self, req, id, body):
"""Stop an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorizer(context, instance, 'stop')
LOG.debug('stop instance', instance=instance)
try:
self.compute_api.stop(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked,
exception.InstanceInvalidState) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
LOG.debug("Removing options '%s' from query",
", ".join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)
class Servers(extensions.V3APIExtensionBase):
"""Servers."""
name = "Servers"
alias = "servers"
version = 1
def get_resources(self):
member_actions = {'action': 'POST'}
collection_actions = {'detail': 'GET'}
resources = [
extensions.ResourceExtension(
'servers',
ServersController(extension_info=self.extension_info),
member_name='server', collection_actions=collection_actions,
member_actions=member_actions)]
return resources
def get_controller_extensions(self):
return []
| |
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""A library of static analysis functions for computation types."""
import collections
from typing import Any, Callable, Optional
import tensorflow as tf
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.impl.types import type_transformations
_TypePredicate = Callable[[computation_types.Type], bool]
def preorder_types(type_signature: computation_types.Type):
"""Yields each type in `type_signature` in a preorder fashion."""
yield type_signature
for child in type_signature.children():
yield from preorder_types(child)
def count(type_signature: computation_types.Type,
predicate: _TypePredicate) -> int:
"""Returns the number of types in `type_signature` matching `predicate`.
Args:
type_signature: A tree of `computation_type.Type`s to count.
predicate: A Python function that takes a type as a parameter and returns a
boolean value.
"""
one_or_zero = lambda t: 1 if predicate(t) else 0
return sum(map(one_or_zero, preorder_types(type_signature)))
def contains(type_signature: computation_types.Type,
predicate: _TypePredicate) -> bool:
"""Checks if `type_signature` contains any types that pass `predicate`."""
for t in preorder_types(type_signature):
if predicate(t):
return True
return False
def contains_federated_types(type_signature):
"""Returns whether or not `type_signature` contains a federated type."""
return contains(type_signature, lambda t: t.is_federated())
def contains_tensor_types(type_signature):
"""Returns whether or not `type_signature` contains a tensor type."""
return contains(type_signature, lambda t: t.is_tensor())
def contains_only(
type_signature: computation_types.Type,
predicate: _TypePredicate,
) -> bool:
"""Checks if `type_signature` contains only types that pass `predicate`."""
return not contains(type_signature, lambda t: not predicate(t))
def check_type(value: Any, type_spec: computation_types.Type):
"""Checks whether `val` is of TFF type `type_spec`.
Args:
value: The object to check.
type_spec: A `computation_types.Type`, the type that `value` is checked
against.
Raises:
TypeError: If the inferred type of `value` is not assignable to `type_spec`.
"""
py_typecheck.check_type(type_spec, computation_types.Type)
value_type = type_conversions.infer_type(value)
if not type_spec.is_assignable_from(value_type):
raise TypeError(
computation_types.type_mismatch_error_message(
value_type,
type_spec,
computation_types.TypeRelation.ASSIGNABLE,
second_is_expected=True))
def is_tensorflow_compatible_type(type_spec):
"""Checks `type_spec` against an explicit list of `tf_computation`."""
if type_spec is None:
return True
return contains_only(
type_spec, lambda t: t.is_struct() or t.is_sequence() or t.is_tensor())
def is_structure_of_tensors(type_spec):
return contains_only(type_spec, lambda t: t.is_struct() or t.is_tensor())
def check_tensorflow_compatible_type(type_spec):
if not is_tensorflow_compatible_type(type_spec):
raise TypeError(
'Expected type to be compatible with TensorFlow (i.e. tensor, '
'sequence, or tuple types), found {}.'.format(type_spec))
def is_generic_op_compatible_type(type_spec):
"""Checks `type_spec` against an explicit list of generic operators."""
if type_spec is None:
return False
return contains_only(type_spec, lambda t: t.is_struct() or t.is_tensor())
def is_binary_op_with_upcast_compatible_pair(
possibly_nested_type: Optional[computation_types.Type],
type_to_upcast: computation_types.Type) -> bool:
"""Checks unambiguity in applying `type_to_upcast` to `possibly_nested_type`.
That is, checks that either these types are equivalent and contain only
tuples and tensors, or that
`possibly_nested_type` is perhaps a nested structure containing only tensors
with `dtype` of `type_to_upcast` at the leaves, where `type_to_upcast` must
be a scalar tensor type. Notice that this relationship is not symmetric,
since binary operators need not respect this symmetry in general.
For example, it makes perfect sence to divide a nested structure of tensors
by a scalar, but not the other way around.
Args:
possibly_nested_type: A `computation_types.Type`, or `None`.
type_to_upcast: A `computation_types.Type`, or `None`.
Returns:
Boolean indicating whether `type_to_upcast` can be upcast to
`possibly_nested_type` in the manner described above.
"""
if possibly_nested_type is not None:
py_typecheck.check_type(possibly_nested_type, computation_types.Type)
if type_to_upcast is not None:
py_typecheck.check_type(type_to_upcast, computation_types.Type)
if not (is_generic_op_compatible_type(possibly_nested_type) and
is_generic_op_compatible_type(type_to_upcast)):
return False
if possibly_nested_type is None:
return type_to_upcast is None
if possibly_nested_type.is_equivalent_to(type_to_upcast):
return True
if not (type_to_upcast.is_tensor() and type_to_upcast.shape == tf.TensorShape(
())):
return False
types_are_ok = [True]
only_allowed_dtype = type_to_upcast.dtype
def _check_tensor_types(type_spec):
if type_spec.is_tensor() and type_spec.dtype != only_allowed_dtype:
types_are_ok[0] = False
return type_spec, False
type_transformations.transform_type_postorder(possibly_nested_type,
_check_tensor_types)
return types_are_ok[0]
def check_all_abstract_types_are_bound(type_spec):
"""Checks that all abstract types labels appearing in 'type_spec' are bound.
For abstract types to be bound, it means that type labels appearing on the
result side of functional type signatures must also appear on the parameter
side. This check is intended to verify that abstract types are only used to
model template-like type signatures, and can always be reduce to a concrete
type by specializing templates to work with specific sets of arguments.
Examples of valid types that pass this check successfully:
int32
(int32 -> int32)
( -> int32)
(T -> T)
((T -> T) -> bool)
(( -> T) -> T)
(<T*, ((T, T) -> T)> -> T)
(T* -> int32)
( -> (T -> T))
<T, (U -> U), U> -> <T, U>
Examples of invalid types that fail this check because 'T' is unbound:
T
(int32 -> T)
( -> T)
(T -> U)
Args:
type_spec: An instance of computation_types.Type, or something convertible
to it.
Raises:
TypeError: if arguments are of the wrong types, or if unbound type labels
occur in 'type_spec'.
"""
def _check_or_get_unbound_abstract_type_labels(type_spec, bound_labels,
check):
"""Checks or collects abstract type labels from 'type_spec'.
This is a helper function used by 'check_abstract_types_are_bound', not to
be exported out of this module.
Args:
type_spec: An instance of computation_types.Type.
bound_labels: A set of string labels that refer to 'bound' abstract types,
i.e., ones that appear on the parameter side of a functional type.
check: A bool value. If True, no new unbound type labels are permitted,
and if False, any new labels encountered are returned as a set.
Returns:
If check is False, a set of new abstract type labels introduced in
'type_spec' that don't yet appear in the set 'bound_labels'. If check is
True, always returns an empty set.
Raises:
TypeError: if unbound labels are found and check is True.
"""
py_typecheck.check_type(type_spec, computation_types.Type)
if type_spec.is_tensor():
return set()
elif type_spec.is_sequence():
return _check_or_get_unbound_abstract_type_labels(type_spec.element,
bound_labels, check)
elif type_spec.is_federated():
return _check_or_get_unbound_abstract_type_labels(type_spec.member,
bound_labels, check)
elif type_spec.is_struct():
return set().union(*[
_check_or_get_unbound_abstract_type_labels(v, bound_labels, check)
for _, v in structure.iter_elements(type_spec)
])
elif type_spec.is_abstract():
if type_spec.label in bound_labels:
return set()
elif not check:
return set([type_spec.label])
else:
raise TypeError('Unbound type label \'{}\'.'.format(type_spec.label))
elif type_spec.is_function():
if type_spec.parameter is None:
parameter_labels = set()
else:
parameter_labels = _check_or_get_unbound_abstract_type_labels(
type_spec.parameter, bound_labels, False)
result_labels = _check_or_get_unbound_abstract_type_labels(
type_spec.result, bound_labels.union(parameter_labels), check)
return parameter_labels.union(result_labels)
_check_or_get_unbound_abstract_type_labels(type_spec, set(), True)
def is_numeric_dtype(dtype):
"""Returns True iff `dtype` is numeric.
Args:
dtype: An instance of tf.DType.
Returns:
True iff `dtype` is numeric, i.e., integer, float, or complex.
"""
py_typecheck.check_type(dtype, tf.DType)
return dtype.is_integer or dtype.is_floating or dtype.is_complex
class SumIncompatibleError(TypeError):
def __init__(self, type_spec, type_spec_context, reason):
message = (
'Expected a type which is compatible with the sum operator, found\n'
f'{type_spec_context}\nwhich contains\n{type_spec}\nwhich is not '
f'sum-compatible because {reason}.')
super().__init__(message)
def check_is_sum_compatible(type_spec, type_spec_context=None):
"""Determines if `type_spec` is a type that can be added to itself.
Types that are sum-compatible are composed of scalars of numeric types,
possibly packaged into nested named tuples, and possibly federated. Types
that are sum-incompatible include sequences, functions, abstract types,
and placements.
Args:
type_spec: A `computation_types.Type`.
type_spec_context: An optional parent type to include in the error message.
Raises:
SumIncompatibleError: if `type_spec` is not sum-compatible.
"""
py_typecheck.check_type(type_spec, computation_types.Type)
if type_spec_context is None:
type_spec_context = type_spec
py_typecheck.check_type(type_spec_context, computation_types.Type)
if type_spec.is_tensor():
if not is_numeric_dtype(type_spec.dtype):
raise SumIncompatibleError(type_spec, type_spec_context,
f'{type_spec.dtype} is not numeric')
if not type_spec.shape.is_fully_defined():
raise SumIncompatibleError(type_spec, type_spec_context,
f'{type_spec.shape} is not fully defined')
elif type_spec.is_struct():
if (type_spec.python_container is tf.RaggedTensor or
type_spec.python_container is tf.sparse.SparseTensor):
raise SumIncompatibleError(
type_spec, type_spec_context,
'`tf.RaggedTensor` and `tf.sparse.SparseTensor` cannot be used with '
'simple summation')
for _, element_type in structure.iter_elements(type_spec):
check_is_sum_compatible(element_type, type_spec_context)
elif type_spec.is_federated():
check_is_sum_compatible(type_spec.member, type_spec_context)
else:
raise SumIncompatibleError(
type_spec, type_spec_context,
'only structures of tensors (possibly federated) may be summed')
def is_structure_of_floats(type_spec: computation_types.Type) -> bool:
"""Determines if `type_spec` is a structure of floats.
Note that an empty `computation_types.StructType` will return `True`, as it
does not contain any non-floating types.
Args:
type_spec: A `computation_types.Type`.
Returns:
`True` iff `type_spec` is a structure of floats, otherwise `False`.
"""
py_typecheck.check_type(type_spec, computation_types.Type)
if type_spec.is_tensor():
py_typecheck.check_type(type_spec.dtype, tf.DType)
return type_spec.dtype.is_floating
elif type_spec.is_struct():
return all(
is_structure_of_floats(v)
for _, v in structure.iter_elements(type_spec))
elif type_spec.is_federated():
return is_structure_of_floats(type_spec.member)
else:
return False
def check_is_structure_of_floats(type_spec):
if not is_structure_of_floats(type_spec):
raise TypeError(
'Expected a type which is structure of floats, found {}.'.format(
type_spec))
def is_structure_of_integers(type_spec: computation_types.Type) -> bool:
"""Determines if `type_spec` is a structure of integers.
Note that an empty `computation_types.StructType` will return `True`, as it
does not contain any non-integer types.
Args:
type_spec: A `computation_types.Type`.
Returns:
`True` iff `type_spec` is a structure of integers, otherwise `False`.
"""
py_typecheck.check_type(type_spec, computation_types.Type)
if type_spec.is_tensor():
py_typecheck.check_type(type_spec.dtype, tf.DType)
return type_spec.dtype.is_integer
elif type_spec.is_struct():
return all(
is_structure_of_integers(v)
for _, v in structure.iter_elements(type_spec))
elif type_spec.is_federated():
return is_structure_of_integers(type_spec.member)
else:
return False
def check_is_structure_of_integers(type_spec):
if not is_structure_of_integers(type_spec):
raise TypeError(
'Expected a type which is structure of integers, found {}.'.format(
type_spec))
def is_single_integer_or_matches_structure(
type_sig: computation_types.Type,
shape_type: computation_types.Type) -> bool:
"""If `type_sig` is an integer or integer structure matching `shape_type`."""
py_typecheck.check_type(type_sig, computation_types.Type)
py_typecheck.check_type(shape_type, computation_types.Type)
if type_sig.is_tensor():
# This condition applies to both `shape_type` being a tensor or structure,
# as the same integer bitwidth can be used for all values in the structure.
return type_sig.dtype.is_integer and (type_sig.shape.num_elements() == 1)
elif shape_type.is_struct() and type_sig.is_struct():
bitwidth_name_and_types = list(structure.iter_elements(type_sig))
shape_name_and_types = list(structure.iter_elements(shape_type))
if len(type_sig) != len(shape_name_and_types):
return False
for (inner_name, type_sig), (inner_shape_name, inner_shape_type) in zip(
bitwidth_name_and_types, shape_name_and_types):
if inner_name != inner_shape_name:
return False
if not is_single_integer_or_matches_structure(type_sig, inner_shape_type):
return False
return True
else:
return False
def check_federated_type(
type_spec: computation_types.FederatedType,
member: Optional[computation_types.Type] = None,
placement: Optional[placements.PlacementLiteral] = None,
all_equal: Optional[bool] = None):
"""Checks that `type_spec` is a federated type with the given parameters.
Args:
type_spec: The `tff.FederatedType` to check.
member: The expected member type, or `None` if unspecified.
placement: The desired placement, or `None` if unspecified.
all_equal: The desired result of accessing the property
`tff.FederatedType.all_equal` of `type_spec`, or `None` if left
unspecified.
Raises:
TypeError: if `type_spec` is not a federated type of the given kind.
"""
py_typecheck.check_type(type_spec, computation_types.FederatedType)
if member is not None:
py_typecheck.check_type(member, computation_types.Type)
member.check_assignable_from(type_spec.member)
if placement is not None:
py_typecheck.check_type(placement, placements.PlacementLiteral)
if type_spec.placement is not placement:
raise TypeError(
'Expected federated type placed at {}, got one placed at {}.'.format(
placement, type_spec.placement))
if all_equal is not None:
py_typecheck.check_type(all_equal, bool)
if type_spec.all_equal != all_equal:
raise TypeError(
'Expected federated type with all_equal {}, got one with {}.'.format(
all_equal, type_spec.all_equal))
def is_average_compatible(type_spec: computation_types.Type) -> bool:
"""Determines if `type_spec` can be averaged.
Types that are average-compatible are composed of numeric tensor types,
either floating-point or complex, possibly packaged into nested named tuples,
and possibly federated.
Args:
type_spec: a `computation_types.Type`.
Returns:
`True` iff `type_spec` is average-compatible, `False` otherwise.
"""
py_typecheck.check_type(type_spec, computation_types.Type)
if type_spec.is_tensor():
return type_spec.dtype.is_floating or type_spec.dtype.is_complex
elif type_spec.is_struct():
return all(
is_average_compatible(v) for _, v in structure.iter_elements(type_spec))
elif type_spec.is_federated():
return is_average_compatible(type_spec.member)
else:
return False
def is_struct_with_py_container(value, type_spec):
return (type_spec.is_struct_with_python() and
isinstance(value, structure.Struct))
class NotConcreteTypeError(TypeError):
def __init__(self, full_type, found_abstract):
message = ('Expected concrete type containing no abstract types, but '
f'found abstract type {found_abstract} in {full_type}.')
super().__init__(message)
class MismatchedConcreteTypesError(TypeError):
def __init__(self, full_concrete, full_generic, abstract_label,
first_concrete, second_concrete):
message = (
f'Expected concrete type {full_concrete} to be a valid substitution '
f'for generic type {full_generic}, but abstract type {abstract_label} '
f'had substitutions {first_concrete} and {second_concrete}, which are '
'not equivalent.')
super().__init__(message)
class UnassignableConcreteTypesError(TypeError):
def __init__(self, full_concrete, full_generic, abstract_label, definition,
not_assignable_from):
message = (
f'Expected concrete type {full_concrete} to be a valid substitution '
f'for generic type {full_generic}, but abstract type {abstract_label} '
f'was defined as {definition}, and later used as {not_assignable_from} '
' which cannot be assigned from the former.')
super().__init__(message)
class MismatchedStructureError(TypeError):
def __init__(self, full_concrete, full_generic, concrete_member,
generic_member, mismatch):
message = (
f'Expected concrete type {full_concrete} to be a valid substitution '
f'for generic type {full_generic}, but their structures do not match: '
f'{concrete_member} differs in {mismatch} from {generic_member}.')
super().__init__(message)
class MissingDefiningUsageError(TypeError):
def __init__(self, generic_type, label_name):
message = (
f'Missing defining use of abstract type {label_name} in type '
f'{generic_type}. See `check_concrete_instance_of` documentation for '
'details on what counts as a defining use.')
super().__init__(message)
def check_concrete_instance_of(concrete_type: computation_types.Type,
generic_type: computation_types.Type):
"""Checks whether `concrete_type` is a valid substitution of `generic_type`.
This function determines whether `generic_type`'s type parameters can be
substituted such that it is equivalent to `concrete type`.
Note that passing through argument-position of function type swaps the
variance of abstract types. Argument-position types can be assigned *from*
other instances of the same type, but are not equivalent to it.
Due to this variance issue, only abstract types must include at least one
"defining" usage. "Defining" uses are those which are encased in function
parameter position an odd number of times. These usages must all be
equivalent. Non-defining usages need not compare equal but must be assignable
*from* defining usages.
Args:
concrete_type: A type containing no `computation_types.AbstractType`s to
check against `generic_type`'s shape.
generic_type: A type which may contain `computation_types.AbstractType`s.
Raises:
TypeError: If `concrete_type` is not a valid substitution of `generic_type`.
"""
py_typecheck.check_type(concrete_type, computation_types.Type)
py_typecheck.check_type(generic_type, computation_types.Type)
for t in preorder_types(concrete_type):
if t.is_abstract():
raise NotConcreteTypeError(concrete_type, t)
type_bindings = {}
non_defining_usages = collections.defaultdict(list)
def _check_helper(generic_type_member: computation_types.Type,
concrete_type_member: computation_types.Type,
defining: bool):
"""Recursive helper function."""
def _raise_structural(mismatch):
raise MismatchedStructureError(concrete_type, generic_type,
concrete_type_member, generic_type_member,
mismatch)
def _both_are(predicate):
if predicate(generic_type_member):
if predicate(concrete_type_member):
return True
else:
_raise_structural('kind')
else:
return False
if generic_type_member.is_abstract():
label = str(generic_type_member.label)
if not defining:
non_defining_usages[label].append(concrete_type_member)
else:
bound_type = type_bindings.get(label)
if bound_type is not None:
if not concrete_type_member.is_equivalent_to(bound_type):
raise MismatchedConcreteTypesError(concrete_type, generic_type,
label, bound_type,
concrete_type_member)
else:
type_bindings[label] = concrete_type_member
elif _both_are(lambda t: t.is_tensor()):
if generic_type_member != concrete_type_member:
_raise_structural('tensor types')
elif _both_are(lambda t: t.is_placement()):
if generic_type_member != concrete_type_member:
_raise_structural('placements')
elif _both_are(lambda t: t.is_struct()):
generic_elements = structure.to_elements(generic_type_member)
concrete_elements = structure.to_elements(concrete_type_member)
if len(generic_elements) != len(concrete_elements):
_raise_structural('length')
for k in range(len(generic_elements)):
if generic_elements[k][0] != concrete_elements[k][0]:
_raise_structural('element names')
_check_helper(generic_elements[k][1], concrete_elements[k][1], defining)
elif _both_are(lambda t: t.is_sequence()):
_check_helper(generic_type_member.element, concrete_type_member.element,
defining)
elif _both_are(lambda t: t.is_function()):
if generic_type_member.parameter is None:
if concrete_type_member.parameter is not None:
_raise_structural('parameter')
else:
_check_helper(generic_type_member.parameter,
concrete_type_member.parameter, not defining)
_check_helper(generic_type_member.result, concrete_type_member.result,
defining)
elif _both_are(lambda t: t.is_federated()):
if generic_type_member.placement != concrete_type_member.placement:
_raise_structural('placement')
if generic_type_member.all_equal != concrete_type_member.all_equal:
_raise_structural('all equal')
_check_helper(generic_type_member.member, concrete_type_member.member,
defining)
else:
raise TypeError(f'Unexpected type kind {generic_type}.')
_check_helper(generic_type, concrete_type, False)
for label, usages in non_defining_usages.items():
bound_type = type_bindings.get(label)
if bound_type is None:
if len(usages) == 1:
# Single-use abstract types can't be wrong.
# Note: we could also add an exception here for cases where every usage
# is equivalent to the first usage. However, that's not currently
# needed since the only intrinsic that doesn't have a defining use is
# GENERIC_ZERO, which has only a single-use type parameter.
pass
else:
raise MissingDefiningUsageError(generic_type, label)
else:
for usage in usages:
if not usage.is_assignable_from(bound_type):
raise UnassignableConcreteTypesError(concrete_type, generic_type,
label, bound_type, usage)
def check_valid_federated_weighted_mean_argument_tuple_type(
type_spec: computation_types.StructType):
"""Checks that `type_spec` is a valid type of a federated weighted mean arg.
Args:
type_spec: A `computation_types.StructType`.
Raises:
TypeError: If the check fails.
"""
py_typecheck.check_type(type_spec, computation_types.StructType)
if len(type_spec) != 2:
raise TypeError('Expected a 2-tuple, found {}.'.format(type_spec))
for _, v in structure.iter_elements(type_spec):
check_federated_type(v, None, placements.CLIENTS, False)
if not is_average_compatible(v.member):
raise TypeError(
'Expected average-compatible args, got {} from argument of type {}.'
.format(v.member, type_spec))
w_type = type_spec[1].member
py_typecheck.check_type(w_type, computation_types.TensorType)
if w_type.shape.ndims != 0:
raise TypeError('Expected scalar weight, got {}.'.format(w_type))
def count_tensors_in_type(
type_spec: computation_types.Type,
tensor_filter: Optional[Callable[[computation_types.TensorType],
bool]] = None
) -> collections.OrderedDict:
"""Counts tensors and fully-specified elements under `type_spec`.
Args:
type_spec: Instance of `computation_types.Type` to count tensors under.
tensor_filter: Optional filtering function. Callable which takes an argument
of type `computation_types.TensorType` and returns a boolean. If
specified, only tensor type which pass this filter (IE, on which this
function returns `True`) will be counted.
Returns:
A `collections.OrderedDict` with three parameters. The first, `tensors`, is
the count of all `computation_types.TensorType` (passing `tensor_filter`
if this argument is specified). The second, `parameters`, is the count
of all fully-specified parameters of these tensors. Note that this implies
any tensor with a `None` dimension (IE, of unspecified size) will not be
counted. The third counts how many tensors fall into this category (that
is, now many have unspecified size).
"""
py_typecheck.check_type(type_spec, computation_types.Type)
if tensor_filter is None:
tensor_filter = lambda _: True
py_typecheck.check_callable(tensor_filter)
tensors_and_params = collections.OrderedDict(
num_tensors=0, parameters=0, num_unspecified_tensors=0)
def _capture_tensors(type_signature):
if type_signature.is_tensor() and tensor_filter(type_signature):
tensors_and_params['num_tensors'] += 1
num_parameters = type_signature.shape.num_elements()
if num_parameters is not None:
tensors_and_params['parameters'] += num_parameters
else:
tensors_and_params['num_unspecified_tensors'] += 1
return type_signature, False
type_transformations.transform_type_postorder(type_spec, _capture_tensors)
return tensors_and_params
| |
import os
import sys
import tempfile
import unittest
from six import b as b_
from webtest import TestApp
import pecan
from pecan.tests import PecanTestCase
__here__ = os.path.dirname(__file__)
class TestConf(PecanTestCase):
def test_update_config_fail_identifier(self):
"""Fail when naming does not pass correctness"""
from pecan import configuration
bad_dict = {'bad name': 'value'}
self.assertRaises(ValueError, configuration.Config, bad_dict)
def test_update_config_fail_message(self):
"""When failing, the __force_dict__ key is suggested"""
from pecan import configuration
bad_dict = {'bad name': 'value'}
try:
configuration.Config(bad_dict)
except ValueError as error:
assert "consider using the '__force_dict__'" in str(error)
def test_update_set_config(self):
"""Update an empty configuration with the default values"""
from pecan import configuration
conf = configuration.initconf()
conf.update(configuration.conf_from_file(os.path.join(
__here__,
'config_fixtures/config.py'
)))
self.assertEqual(conf.app.root, None)
self.assertEqual(conf.app.template_path, 'myproject/templates')
self.assertEqual(conf.app.static_root, 'public')
self.assertEqual(conf.server.host, '1.1.1.1')
self.assertEqual(conf.server.port, '8081')
def test_update_set_default_config(self):
"""Update an empty configuration with the default values"""
from pecan import configuration
conf = configuration.initconf()
conf.update(configuration.conf_from_file(os.path.join(
__here__,
'config_fixtures/empty.py'
)))
self.assertEqual(conf.app.root, None)
self.assertEqual(conf.app.template_path, '')
self.assertEqual(conf.app.static_root, 'public')
self.assertEqual(conf.server.host, '0.0.0.0')
self.assertEqual(conf.server.port, '8080')
def test_update_force_dict(self):
"""Update an empty configuration with the default values"""
from pecan import configuration
conf = configuration.initconf()
conf.update(configuration.conf_from_file(os.path.join(
__here__,
'config_fixtures/forcedict.py'
)))
self.assertEqual(conf.app.root, None)
self.assertEqual(conf.app.template_path, '')
self.assertEqual(conf.app.static_root, 'public')
self.assertEqual(conf.server.host, '0.0.0.0')
self.assertEqual(conf.server.port, '8080')
self.assertTrue(isinstance(conf.beaker, dict))
self.assertEqual(conf.beaker['session.key'], 'key')
self.assertEqual(conf.beaker['session.type'], 'cookie')
self.assertEqual(
conf.beaker['session.validate_key'],
'1a971a7df182df3e1dec0af7c6913ec7'
)
self.assertEqual(conf.beaker.get('__force_dict__'), None)
def test_update_config_with_dict(self):
from pecan import configuration
conf = configuration.initconf()
d = {'attr': True}
conf['attr'] = d
self.assertTrue(conf.attr.attr)
def test_config_repr(self):
from pecan import configuration
conf = configuration.Config({'a': 1})
self.assertEqual(repr(conf), "Config({'a': 1})")
def test_config_from_dict(self):
from pecan import configuration
conf = configuration.conf_from_dict({})
conf['path'] = '%(confdir)s'
self.assertTrue(os.path.samefile(conf['path'], os.getcwd()))
def test_config_from_file(self):
from pecan import configuration
path = os.path.join(
os.path.dirname(__file__), 'config_fixtures', 'config.py'
)
configuration.conf_from_file(path)
def test_config_illegal_ids(self):
from pecan import configuration
conf = configuration.Config({})
conf.update(configuration.conf_from_file(os.path.join(
__here__,
'config_fixtures/bad/module_and_underscore.py'
)))
self.assertEqual([], list(conf))
def test_config_missing_file(self):
from pecan import configuration
path = ('doesnotexist.py',)
configuration.Config({})
self.assertRaises(
RuntimeError,
configuration.conf_from_file,
os.path.join(__here__, 'config_fixtures', *path)
)
def test_config_missing_file_on_path(self):
from pecan import configuration
path = ('bad', 'bad', 'doesnotexist.py',)
configuration.Config({})
self.assertRaises(
RuntimeError,
configuration.conf_from_file,
os.path.join(__here__, 'config_fixtures', *path)
)
def test_config_with_syntax_error(self):
from pecan import configuration
with tempfile.NamedTemporaryFile('wb') as f:
f.write(b_('\n'.join(['if false', 'var = 3'])))
f.flush()
configuration.Config({})
self.assertRaises(
SyntaxError,
configuration.conf_from_file,
f.name
)
def test_config_with_non_package_relative_import(self):
from pecan import configuration
with tempfile.NamedTemporaryFile('wb', suffix='.py') as f:
f.write(b_('\n'.join(['from . import variables'])))
f.flush()
configuration.Config({})
try:
configuration.conf_from_file(f.name)
except (ValueError, SystemError, ImportError) as e:
assert 'relative import' in str(e)
else:
raise AssertionError(
"A relative import-related error should have been raised"
)
def test_config_with_bad_import(self):
from pecan import configuration
path = ('bad', 'importerror.py')
configuration.Config({})
self.assertRaises(
ImportError,
configuration.conf_from_file,
os.path.join(
__here__,
'config_fixtures',
*path
)
)
def test_config_dir(self):
from pecan import configuration
conf = configuration.Config({})
self.assertEqual([], dir(conf))
conf = configuration.Config({'a': 1})
self.assertEqual(['a'], dir(conf))
def test_config_bad_key(self):
from pecan import configuration
conf = configuration.Config({'a': 1})
assert conf.a == 1
self.assertRaises(AttributeError, getattr, conf, 'b')
def test_config_get_valid_key(self):
from pecan import configuration
conf = configuration.Config({'a': 1})
assert conf.get('a') == 1
def test_config_get_invalid_key(self):
from pecan import configuration
conf = configuration.Config({'a': 1})
assert conf.get('b') is None
def test_config_get_invalid_key_return_default(self):
from pecan import configuration
conf = configuration.Config({'a': 1})
assert conf.get('b', True) is True
def test_config_to_dict(self):
from pecan import configuration
conf = configuration.initconf()
assert isinstance(conf, configuration.Config)
to_dict = conf.to_dict()
assert isinstance(to_dict, dict)
assert to_dict['server']['host'] == '0.0.0.0'
assert to_dict['server']['port'] == '8080'
assert to_dict['app']['modules'] == []
assert to_dict['app']['root'] is None
assert to_dict['app']['static_root'] == 'public'
assert to_dict['app']['template_path'] == ''
def test_config_to_dict_nested(self):
from pecan import configuration
"""have more than one level nesting and convert to dict"""
conf = configuration.initconf()
nested = {'one': {'two': 2}}
conf['nested'] = nested
to_dict = conf.to_dict()
assert isinstance(to_dict, dict)
assert to_dict['server']['host'] == '0.0.0.0'
assert to_dict['server']['port'] == '8080'
assert to_dict['app']['modules'] == []
assert to_dict['app']['root'] is None
assert to_dict['app']['static_root'] == 'public'
assert to_dict['app']['template_path'] == ''
assert to_dict['nested']['one']['two'] == 2
def test_config_to_dict_prefixed(self):
from pecan import configuration
"""Add a prefix for keys"""
conf = configuration.initconf()
assert isinstance(conf, configuration.Config)
to_dict = conf.to_dict('prefix_')
assert isinstance(to_dict, dict)
assert to_dict['prefix_server']['prefix_host'] == '0.0.0.0'
assert to_dict['prefix_server']['prefix_port'] == '8080'
assert to_dict['prefix_app']['prefix_modules'] == []
assert to_dict['prefix_app']['prefix_root'] is None
assert to_dict['prefix_app']['prefix_static_root'] == 'public'
assert to_dict['prefix_app']['prefix_template_path'] == ''
class TestGlobalConfig(PecanTestCase):
def tearDown(self):
from pecan import configuration
configuration.set_config(
dict(configuration.initconf()),
overwrite=True
)
def test_paint_from_dict(self):
from pecan import configuration
configuration.set_config({'foo': 'bar'})
assert dict(configuration._runtime_conf) != {'foo': 'bar'}
self.assertEqual(configuration._runtime_conf.foo, 'bar')
def test_overwrite_from_dict(self):
from pecan import configuration
configuration.set_config({'foo': 'bar'}, overwrite=True)
assert dict(configuration._runtime_conf) == {'foo': 'bar'}
def test_paint_from_file(self):
from pecan import configuration
configuration.set_config(os.path.join(
__here__,
'config_fixtures/foobar.py'
))
assert dict(configuration._runtime_conf) != {'foo': 'bar'}
assert configuration._runtime_conf.foo == 'bar'
def test_overwrite_from_file(self):
from pecan import configuration
configuration.set_config(
os.path.join(
__here__,
'config_fixtures/foobar.py',
),
overwrite=True
)
assert dict(configuration._runtime_conf) == {'foo': 'bar'}
def test_set_config_none_type(self):
from pecan import configuration
self.assertRaises(RuntimeError, configuration.set_config, None)
def test_set_config_to_dir(self):
from pecan import configuration
self.assertRaises(RuntimeError, configuration.set_config, '/')
class TestConfFromEnv(PecanTestCase):
#
# Note that there is a good chance of pollution if ``tearDown`` does not
# reset the configuration like this class does. If implementing new classes
# for configuration this tearDown **needs to be implemented**
#
def setUp(self):
super(TestConfFromEnv, self).setUp()
self.addCleanup(self._remove_config_key)
from pecan import configuration
self.get_conf_path_from_env = configuration.get_conf_path_from_env
def _remove_config_key(self):
os.environ.pop('PECAN_CONFIG', None)
def test_invalid_path(self):
os.environ['PECAN_CONFIG'] = '/'
msg = "PECAN_CONFIG was set to an invalid path: /"
self.assertRaisesRegexp(
RuntimeError,
msg,
self.get_conf_path_from_env
)
def test_is_not_set(self):
msg = "PECAN_CONFIG is not set and " \
"no config file was passed as an argument."
self.assertRaisesRegexp(
RuntimeError,
msg,
self.get_conf_path_from_env
)
def test_return_valid_path(self):
__here__ = os.path.abspath(__file__)
os.environ['PECAN_CONFIG'] = __here__
assert self.get_conf_path_from_env() == __here__
class TestConfigCleanup(unittest.TestCase):
def setUp(self):
class RootController(object):
@pecan.expose()
def index(self):
return 'Hello, World!'
self.app = TestApp(pecan.Pecan(RootController()))
def tearDown(self):
pecan.configuration.set_config(pecan.configuration.DEFAULT,
overwrite=True)
def test_conf_default(self):
assert pecan.conf.server.to_dict() == {
'port': '8080', 'host': '0.0.0.0'
}
def test_conf_changed(self):
pecan.conf.server = pecan.configuration.Config({'port': '80'})
assert pecan.conf.server.to_dict() == {'port': '80'}
| |
"""Tests of cleverhans.attacks_tf
"""
# pylint: disable=missing-docstring
from functools import partial
import unittest
import numpy as np
import tensorflow as tf
from cleverhans.devtools.checks import CleverHansTest
from cleverhans.attacks_tf import fgm, pgd_attack, \
UnrolledAdam, UnrolledGradientDescent, parallel_apply_transformations
from cleverhans.devtools.mocks import random_feed_dict
from cleverhans.model import Model
class SimpleModel(Model):
"""
A very simple neural network
"""
def __init__(self, scope='simple', nb_classes=2, **kwargs):
del kwargs
Model.__init__(self, scope, nb_classes, locals())
def fprop(self, x, **kwargs):
del kwargs
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
w1 = tf.constant(
[[1.5, .3], [-2, 0.3]], dtype=tf.as_dtype(x.dtype))
w2 = tf.constant(
[[-2.4, 1.2], [0.5, -2.3]], dtype=tf.as_dtype(x.dtype))
h1 = tf.nn.sigmoid(tf.matmul(x, w1))
res = tf.matmul(h1, w2)
return {self.O_LOGITS: res, self.O_PROBS: tf.nn.softmax(res)}
class TestAttackTF(CleverHansTest):
def setUp(self):
super(TestAttackTF, self).setUp()
self.sess = tf.Session()
self.model = SimpleModel()
def test_fgm_gradient_max(self):
input_dim = 2
nb_classes = 3
batch_size = 4
rng = np.random.RandomState([2017, 8, 23])
x = tf.placeholder(tf.float32, [batch_size, input_dim])
weights = tf.placeholder(tf.float32, [input_dim, nb_classes])
logits = tf.matmul(x, weights)
probs = tf.nn.softmax(logits)
adv_x = fgm(x, probs)
random_example = rng.randint(batch_size)
random_feature = rng.randint(input_dim)
output = tf.slice(adv_x, [random_example, random_feature], [1, 1])
dx, = tf.gradients(output, x)
# The following line catches GitHub issue #243
self.assertIsNotNone(dx)
dx = self.sess.run(dx, feed_dict=random_feed_dict(rng, [x, weights]))
ground_truth = np.zeros((batch_size, input_dim))
ground_truth[random_example, random_feature] = 1.
self.assertClose(dx, ground_truth)
def helper_pgd_attack(self,
unrolled_optimizer,
targeted,
nb_iters=20,
epsilon=.5,
clip_min=-5.,
clip_max=5.,
assert_threshold=0.5):
def loss_fn(input_image, label, targeted):
res = self.model.fprop(input_image)
logits = res[self.model.O_LOGITS]
multiplier = 1. if targeted else -1.
return multiplier * tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label, logits=logits)
x_val_ph = tf.placeholder(tf.float32, shape=[100, 2])
x_val = np.random.randn(100, 2).astype(np.float32)
init_model_output = self.model.fprop(x_val_ph)
init_model_logits = init_model_output[self.model.O_LOGITS]
if targeted:
labels = np.random.random_integers(0, 1, size=(100, ))
else:
labels = tf.stop_gradient(tf.argmax(init_model_logits, axis=1))
def _project_perturbation(perturbation, epsilon, input_image,
clip_min, clip_max):
clipped_perturbation = tf.clip_by_value(perturbation, -epsilon,
epsilon)
new_image = tf.clip_by_value(input_image + clipped_perturbation,
clip_min, clip_max)
return new_image - input_image
x_adv = pgd_attack(
loss_fn=partial(loss_fn, targeted=targeted),
input_image=x_val_ph,
label=labels,
epsilon=epsilon,
num_steps=nb_iters,
optimizer=unrolled_optimizer,
project_perturbation=_project_perturbation,
clip_min=clip_min, clip_max=clip_max)
final_model_output = self.model.fprop(x_adv)
final_model_logits = final_model_output[self.model.O_LOGITS]
if not targeted:
logits1, logits2 = self.sess.run(
[init_model_logits, final_model_logits],
feed_dict={x_val_ph: x_val})
preds1 = np.argmax(logits1, axis=1)
preds2 = np.argmax(logits2, axis=1)
self.assertTrue(
np.mean(preds1 == preds2) < assert_threshold,
np.mean(preds1 == preds2))
else:
logits_adv = self.sess.run(
final_model_logits, feed_dict={x_val_ph: x_val})
preds_adv = np.argmax(logits_adv, axis=1)
self.assertTrue(np.mean(labels == preds_adv) > assert_threshold)
def test_pgd_untargeted_attack_with_adam_optimizer(self):
unrolled_optimizer = UnrolledAdam(lr=0.1)
self.helper_pgd_attack(
unrolled_optimizer=unrolled_optimizer,
targeted=False,
epsilon=.5,
nb_iters=20,
clip_min=-10.,
clip_max=10.,
assert_threshold=0.7)
def test_stronger_pgd_untargeted_attack_with_adam_optimizer(self):
unrolled_optimizer = UnrolledAdam(lr=0.1)
self.helper_pgd_attack(
unrolled_optimizer=unrolled_optimizer,
targeted=False,
epsilon=5.,
nb_iters=100,
clip_min=-10.,
clip_max=10.,
assert_threshold=0.1)
def test_pgd_targeted_attack_with_adam_optimizer(self):
unrolled_optimizer = UnrolledAdam(lr=0.1)
self.helper_pgd_attack(
unrolled_optimizer=unrolled_optimizer,
targeted=True,
epsilon=.5,
nb_iters=20,
clip_min=-10.,
clip_max=10.,
assert_threshold=0.7)
def test_stronger_pgd_targeted_attack_with_adam_optimizer(self):
unrolled_optimizer = UnrolledAdam(lr=0.1)
self.helper_pgd_attack(
unrolled_optimizer=unrolled_optimizer,
targeted=True,
epsilon=5.,
nb_iters=100,
clip_min=-10.,
clip_max=10.,
assert_threshold=0.9)
def test_pgd_untargeted_attack_with_sgd_optimizer(self):
unrolled_optimizer = UnrolledGradientDescent(lr=1000.)
self.helper_pgd_attack(
unrolled_optimizer=unrolled_optimizer,
targeted=False,
epsilon=.5,
nb_iters=20,
clip_min=-10.,
clip_max=10.,
assert_threshold=0.6)
def test_stronger_pgd_untargeted_attack_with_sgd_optimizer(self):
unrolled_optimizer = UnrolledGradientDescent(lr=1000.)
self.helper_pgd_attack(
unrolled_optimizer=unrolled_optimizer,
targeted=False,
epsilon=5.,
nb_iters=100,
clip_min=-10.,
clip_max=10.,
assert_threshold=0.1)
def test_pgd_targeted_attack_with_sgd_optimizer(self):
unrolled_optimizer = UnrolledGradientDescent(lr=1000.)
self.helper_pgd_attack(
unrolled_optimizer=unrolled_optimizer,
targeted=True,
epsilon=.5,
nb_iters=20,
clip_min=-10.,
clip_max=10.,
assert_threshold=0.6)
def test_stronger_pgd_targeted_attack_with_sgd_optimizer(self):
unrolled_optimizer = UnrolledGradientDescent(lr=1000.)
self.helper_pgd_attack(
unrolled_optimizer=unrolled_optimizer,
targeted=True,
epsilon=5.,
nb_iters=100,
clip_min=-10.,
clip_max=10.,
assert_threshold=0.9)
@unittest.skip("This test requires human inspection of the images")
def test_parallel_apply(self):
def _save_image_to_png(image_np, filename):
from PIL import Image
import os
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
if image_np.shape[-1] == 3:
img = Image.fromarray(np.uint8(image_np * 255.), 'RGB')
else:
img = Image.fromarray(np.uint8(image_np[:, :, 0] * 255.), 'L')
img.save(filename)
x = tf.ones([3, 200, 200, 3])
transforms = [
[0.2, 0, 20],
[0, 0, 0],
# [-0.2, 0, 20],
# [-0.4, 0, 20],
]
transformed_ims = parallel_apply_transformations(
x, transforms, black_border_size=30)
worst_sample_idx = tf.convert_to_tensor([0, 1, 1])
batch_size = tf.shape(x)[0]
keys = tf.stack([
tf.range(batch_size, dtype=tf.int32),
tf.cast(worst_sample_idx, tf.int32)
], axis=1)
transformed_ims_bshwc = tf.einsum('sbhwc->bshwc', transformed_ims)
after_lookup = tf.gather_nd(transformed_ims_bshwc, keys) # BHWC
with tf.Session() as sess:
img_batch_np = sess.run(after_lookup)[:, :, :, :]
for i, img in enumerate(img_batch_np):
filename = "/tmp/test_image%s.png" % (i)
_save_image_to_png(img, filename)
if __name__ == '__main__':
unittest.main()
| |
from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import date, time, timedelta
import mock
import unittest
import django
from django import forms
from django.test import TestCase
from django_filters import filters
from django_filters.fields import (
Lookup,
RangeField,
DateRangeField,
TimeRangeField,
LookupTypeField)
from django_filters.filters import (
Filter,
CharFilter,
BooleanFilter,
ChoiceFilter,
MultipleChoiceFilter,
DateFilter,
DateTimeFilter,
TimeFilter,
ModelChoiceFilter,
ModelMultipleChoiceFilter,
NumberFilter,
NumericRangeFilter,
RangeFilter,
DateRangeFilter,
DateFromToRangeFilter,
TimeRangeFilter,
AllValuesFilter,
UUIDFilter,
LOOKUP_TYPES)
from tests.models import Book, User
class FilterTests(TestCase):
def test_creation(self):
f = Filter()
self.assertEqual(f.lookup_type, 'exact')
self.assertEqual(f.exclude, False)
def test_creation_order(self):
f = Filter()
f2 = Filter()
self.assertTrue(f2.creation_counter > f.creation_counter)
def test_default_field(self):
f = Filter()
field = f.field
self.assertIsInstance(field, forms.Field)
self.assertEqual(field.help_text, 'Filter')
def test_field_with_exclusion(self):
f = Filter(exclude=True)
field = f.field
self.assertIsInstance(field, forms.Field)
self.assertEqual(field.help_text, 'This is an exclusion filter')
def test_field_with_single_lookup_type(self):
f = Filter(lookup_type='iexact')
field = f.field
self.assertIsInstance(field, forms.Field)
def test_field_with_none_lookup_type(self):
f = Filter(lookup_type=None)
field = f.field
self.assertIsInstance(field, LookupTypeField)
choice_field = field.fields[1]
self.assertEqual(len(choice_field.choices), len(LOOKUP_TYPES))
def test_field_with_lookup_type_and_exlusion(self):
f = Filter(lookup_type=None, exclude=True)
field = f.field
self.assertIsInstance(field, LookupTypeField)
self.assertEqual(field.help_text, 'This is an exclusion filter')
def test_field_with_list_lookup_type(self):
f = Filter(lookup_type=('istartswith', 'iendswith'))
field = f.field
self.assertIsInstance(field, LookupTypeField)
choice_field = field.fields[1]
self.assertEqual(len(choice_field.choices), 2)
def test_field_params(self):
with mock.patch.object(Filter, 'field_class',
spec=['__call__']) as mocked:
f = Filter(name='somefield', label='somelabel',
widget='somewidget')
f.field
mocked.assert_called_once_with(required=False,
label='somelabel', widget='somewidget', help_text=mock.ANY)
def test_field_extra_params(self):
with mock.patch.object(Filter, 'field_class',
spec=['__call__']) as mocked:
f = Filter(someattr='someattr')
f.field
mocked.assert_called_once_with(required=mock.ANY,
label=mock.ANY, widget=mock.ANY, help_text=mock.ANY,
someattr='someattr')
def test_field_with_required_filter(self):
with mock.patch.object(Filter, 'field_class',
spec=['__call__']) as mocked:
f = Filter(required=True)
f.field
mocked.assert_called_once_with(required=True,
label=mock.ANY, widget=mock.ANY, help_text=mock.ANY)
def test_filtering(self):
qs = mock.Mock(spec=['filter'])
f = Filter()
result = f.filter(qs, 'value')
qs.filter.assert_called_once_with(None__exact='value')
self.assertNotEqual(qs, result)
def test_filtering_exclude(self):
qs = mock.Mock(spec=['filter', 'exclude'])
f = Filter(exclude=True)
result = f.filter(qs, 'value')
qs.exclude.assert_called_once_with(None__exact='value')
self.assertNotEqual(qs, result)
def test_filtering_uses_name(self):
qs = mock.Mock(spec=['filter'])
f = Filter(name='somefield')
f.filter(qs, 'value')
result = qs.filter.assert_called_once_with(somefield__exact='value')
self.assertNotEqual(qs, result)
def test_filtering_skipped_with_blank_value(self):
qs = mock.Mock()
f = Filter()
result = f.filter(qs, '')
self.assertListEqual(qs.method_calls, [])
self.assertEqual(qs, result)
def test_filtering_skipped_with_none_value(self):
qs = mock.Mock()
f = Filter()
result = f.filter(qs, None)
self.assertListEqual(qs.method_calls, [])
self.assertEqual(qs, result)
def test_filtering_with_list_value(self):
qs = mock.Mock(spec=['filter'])
f = Filter(name='somefield', lookup_type=['some_lookup_type'])
result = f.filter(qs, Lookup('value', 'some_lookup_type'))
qs.filter.assert_called_once_with(somefield__some_lookup_type='value')
self.assertNotEqual(qs, result)
def test_filtering_skipped_with_list_value_with_blank(self):
qs = mock.Mock()
f = Filter(name='somefield', lookup_type=['some_lookup_type'])
result = f.filter(qs, Lookup('', 'some_lookup_type'))
self.assertListEqual(qs.method_calls, [])
self.assertEqual(qs, result)
def test_filtering_skipped_with_list_value_with_blank_lookup(self):
return # Now field is required to provide valid lookup_type if it provides any
qs = mock.Mock(spec=['filter'])
f = Filter(name='somefield', lookup_type=None)
result = f.filter(qs, Lookup('value', ''))
qs.filter.assert_called_once_with(somefield__exact='value')
self.assertNotEqual(qs, result)
def test_filter_using_action(self):
qs = mock.NonCallableMock(spec=[])
action = mock.Mock(spec=['filter'])
f = Filter(action=action)
result = f.filter(qs, 'value')
action.assert_called_once_with(qs, 'value')
self.assertNotEqual(qs, result)
def test_filtering_uses_distinct(self):
qs = mock.Mock(spec=['filter', 'distinct'])
f = Filter(name='somefield', distinct=True)
f.filter(qs, 'value')
result = qs.distinct.assert_called_once_with()
self.assertNotEqual(qs, result)
class CustomFilterWithBooleanCheckTests(TestCase):
def setUp(self):
super(CustomFilterWithBooleanCheckTests, self).setUp()
class CustomTestFilter(Filter):
def filter(self_, qs, value):
if not value:
return qs
return super(CustomTestFilter, self_).filter(qs, value)
self.test_filter_class = CustomTestFilter
def test_lookup_false(self):
qs = mock.Mock(spec=['filter'])
f = self.test_filter_class(name='somefield')
result = f.filter(qs, Lookup('', 'exact'))
self.assertEqual(qs, result)
def test_lookup_true(self):
qs = mock.Mock(spec=['filter'])
f = self.test_filter_class(name='somefield')
result = f.filter(qs, Lookup('somesearch', 'exact'))
qs.filter.assert_called_once_with(somefield__exact='somesearch')
self.assertNotEqual(qs, result)
class CharFilterTests(TestCase):
def test_default_field(self):
f = CharFilter()
field = f.field
self.assertIsInstance(field, forms.CharField)
class UUIDFilterTests(TestCase):
def test_default_field(self):
f = UUIDFilter()
field = f.field
self.assertIsInstance(field, forms.UUIDField)
class BooleanFilterTests(TestCase):
def test_default_field(self):
f = BooleanFilter()
field = f.field
self.assertIsInstance(field, forms.NullBooleanField)
def test_filtering(self):
qs = mock.Mock(spec=['filter'])
f = BooleanFilter(name='somefield')
result = f.filter(qs, True)
qs.filter.assert_called_once_with(somefield__exact=True)
self.assertNotEqual(qs, result)
def test_filtering_exclude(self):
qs = mock.Mock(spec=['exclude'])
f = BooleanFilter(name='somefield', exclude=True)
result = f.filter(qs, True)
qs.exclude.assert_called_once_with(somefield__exact=True)
self.assertNotEqual(qs, result)
@unittest.expectedFailure
def test_filtering_skipped_with_blank_value(self):
qs = mock.Mock()
f = BooleanFilter(name='somefield')
result = f.filter(qs, '')
self.assertListEqual(qs.method_calls, [])
self.assertEqual(qs, result)
def test_filtering_skipped_with_none_value(self):
qs = mock.Mock()
f = BooleanFilter(name='somefield')
result = f.filter(qs, None)
self.assertListEqual(qs.method_calls, [])
self.assertEqual(qs, result)
def test_filtering_lookup_type(self):
qs = mock.Mock(spec=['filter'])
f = BooleanFilter(name='somefield', lookup_type='isnull')
result = f.filter(qs, True)
qs.filter.assert_called_once_with(somefield__isnull=True)
self.assertNotEqual(qs, result)
class ChoiceFilterTests(TestCase):
def test_default_field(self):
f = ChoiceFilter()
field = f.field
self.assertIsInstance(field, forms.ChoiceField)
class MultipleChoiceFilterTests(TestCase):
def test_default_field(self):
f = MultipleChoiceFilter()
field = f.field
self.assertIsInstance(field, forms.MultipleChoiceField)
def test_filtering_requires_name(self):
qs = mock.Mock(spec=['filter'])
f = MultipleChoiceFilter()
with self.assertRaises(TypeError):
f.filter(qs, ['value'])
def test_conjoined_default_value(self):
f = MultipleChoiceFilter()
self.assertFalse(f.conjoined)
def test_conjoined_true(self):
f = MultipleChoiceFilter(conjoined=True)
self.assertTrue(f.conjoined)
def test_filtering(self):
qs = mock.Mock(spec=['filter'])
f = MultipleChoiceFilter(name='somefield')
with mock.patch('django_filters.filters.Q') as mockQclass:
mockQ1, mockQ2 = mock.MagicMock(), mock.MagicMock()
mockQclass.side_effect = [mockQ1, mockQ2]
f.filter(qs, ['value'])
self.assertEqual(mockQclass.call_args_list,
[mock.call(), mock.call(somefield='value')])
mockQ1.__ior__.assert_called_once_with(mockQ2)
qs.filter.assert_called_once_with(mockQ1.__ior__.return_value)
qs.filter.return_value.distinct.assert_called_once_with()
def test_filtering_exclude(self):
qs = mock.Mock(spec=['exclude'])
f = MultipleChoiceFilter(name='somefield', exclude=True)
with mock.patch('django_filters.filters.Q') as mockQclass:
mockQ1, mockQ2 = mock.MagicMock(), mock.MagicMock()
mockQclass.side_effect = [mockQ1, mockQ2]
f.filter(qs, ['value'])
self.assertEqual(mockQclass.call_args_list,
[mock.call(), mock.call(somefield='value')])
mockQ1.__ior__.assert_called_once_with(mockQ2)
qs.exclude.assert_called_once_with(mockQ1.__ior__.return_value)
qs.exclude.return_value.distinct.assert_called_once_with()
def test_filtering_on_required_skipped_when_len_of_value_is_len_of_field_choices(self):
qs = mock.Mock(spec=[])
f = MultipleChoiceFilter(name='somefield', required=True)
f.always_filter = False
result = f.filter(qs, [])
self.assertEqual(len(f.field.choices), 0)
self.assertEqual(qs, result)
f.field.choices = ['some', 'values', 'here']
result = f.filter(qs, ['some', 'values', 'here'])
self.assertEqual(qs, result)
result = f.filter(qs, ['other', 'values', 'there'])
self.assertEqual(qs, result)
@unittest.expectedFailure
def test_filtering_skipped_with_empty_list_value_and_some_choices(self):
qs = mock.Mock(spec=[])
f = MultipleChoiceFilter(name='somefield')
f.field.choices = ['some', 'values', 'here']
result = f.filter(qs, [])
self.assertEqual(qs, result)
def test_filter_conjoined_true(self):
"""Tests that a filter with `conjoined=True` returns objects that
have all the values included in `value`. For example filter
users that have all of this books.
"""
book_kwargs = {'price': 1, 'average_rating': 1}
books = []
books.append(Book.objects.create(**book_kwargs))
books.append(Book.objects.create(**book_kwargs))
books.append(Book.objects.create(**book_kwargs))
books.append(Book.objects.create(**book_kwargs))
books.append(Book.objects.create(**book_kwargs))
books.append(Book.objects.create(**book_kwargs))
user1 = User.objects.create()
user2 = User.objects.create()
user3 = User.objects.create()
user4 = User.objects.create()
user5 = User.objects.create()
user1.favorite_books.add(books[0], books[1])
user2.favorite_books.add(books[0], books[1], books[2])
user3.favorite_books.add(books[1], books[2])
user4.favorite_books.add(books[2], books[3])
user5.favorite_books.add(books[4], books[5])
filter_list = (
((books[0].pk, books[0].pk), # values
[1, 2]), # list of user.pk that have `value` books
((books[1].pk, books[1].pk),
[1, 2, 3]),
((books[2].pk, books[2].pk),
[2, 3, 4]),
((books[3].pk, books[3].pk),
[4, ]),
((books[4].pk, books[4].pk),
[5, ]),
((books[0].pk, books[1].pk),
[1, 2]),
((books[0].pk, books[2].pk),
[2, ]),
((books[1].pk, books[2].pk),
[2, 3]),
((books[2].pk, books[3].pk),
[4, ]),
((books[4].pk, books[5].pk),
[5, ]),
((books[3].pk, books[4].pk),
[]),
)
users = User.objects.all()
for item in filter_list:
f = MultipleChoiceFilter(name='favorite_books__pk', conjoined=True)
queryset = f.filter(users, item[0])
expected_pks = [c[0] for c in queryset.values_list('pk')]
self.assertListEqual(
expected_pks,
item[1],
'Lists Differ: {0} != {1} for case {2}'.format(
expected_pks, item[1], item[0]))
class DateFilterTests(TestCase):
def test_default_field(self):
f = DateFilter()
field = f.field
self.assertIsInstance(field, forms.DateField)
class DateTimeFilterTests(TestCase):
def test_default_field(self):
f = DateTimeFilter()
field = f.field
self.assertIsInstance(field, forms.DateTimeField)
class TimeFilterTests(TestCase):
def test_default_field(self):
f = TimeFilter()
field = f.field
self.assertIsInstance(field, forms.TimeField)
class ModelChoiceFilterTests(TestCase):
def test_default_field_without_queryset(self):
f = ModelChoiceFilter()
with self.assertRaises(TypeError):
f.field
def test_default_field_with_queryset(self):
qs = mock.NonCallableMock(spec=[])
f = ModelChoiceFilter(queryset=qs)
field = f.field
self.assertIsInstance(field, forms.ModelChoiceField)
self.assertEqual(field.queryset, qs)
class ModelMultipleChoiceFilterTests(TestCase):
def test_default_field_without_queryset(self):
f = ModelMultipleChoiceFilter()
with self.assertRaises(TypeError):
f.field
def test_default_field_with_queryset(self):
qs = mock.NonCallableMock(spec=[])
f = ModelMultipleChoiceFilter(queryset=qs)
field = f.field
self.assertIsInstance(field, forms.ModelMultipleChoiceField)
self.assertEqual(field.queryset, qs)
class NumberFilterTests(TestCase):
def test_default_field(self):
f = NumberFilter()
field = f.field
self.assertIsInstance(field, forms.DecimalField)
def test_filtering(self):
qs = mock.Mock(spec=['filter'])
f = NumberFilter()
f.filter(qs, 1)
qs.filter.assert_called_once_with(None__exact=1)
# Also test 0 as it once had a bug
qs.reset_mock()
f.filter(qs, 0)
qs.filter.assert_called_once_with(None__exact=0)
def test_filtering_exclude(self):
qs = mock.Mock(spec=['exclude'])
f = NumberFilter(exclude=True)
f.filter(qs, 1)
qs.exclude.assert_called_once_with(None__exact=1)
# Also test 0 as it once had a bug
qs.reset_mock()
f.filter(qs, 0)
qs.exclude.assert_called_once_with(None__exact=0)
class NumericRangeFilterTests(TestCase):
def test_default_field(self):
f = NumericRangeFilter()
field = f.field
self.assertIsInstance(field, RangeField)
def test_filtering(self):
qs = mock.Mock(spec=['filter'])
value = mock.Mock(start=20, stop=30)
f = NumericRangeFilter()
f.filter(qs, value)
qs.filter.assert_called_once_with(None__exact=(20, 30))
def test_filtering_exclude(self):
qs = mock.Mock(spec=['exclude'])
value = mock.Mock(start=20, stop=30)
f = NumericRangeFilter(exclude=True)
f.filter(qs, value)
qs.exclude.assert_called_once_with(None__exact=(20, 30))
def test_filtering_skipped_with_none_value(self):
qs = mock.Mock(spec=['filter'])
f = NumericRangeFilter()
result = f.filter(qs, None)
self.assertEqual(qs, result)
def test_field_with_lookup_type(self):
qs = mock.Mock()
value = mock.Mock(start=20, stop=30)
f = NumericRangeFilter(lookup_type=('overlap'))
f.filter(qs, value)
qs.filter.assert_called_once_with(None__overlap=(20, 30))
@unittest.expectedFailure
def test_filtering_lower_field_higher_than_upper_field(self):
qs = mock.Mock(spec=['filter'])
value = mock.Mock(start=35, stop=30)
f = NumericRangeFilter()
result = f.filter(qs, value)
self.assertEqual(qs, result)
def test_zero_to_zero(self):
qs = mock.Mock(spec=['filter'])
value = mock.Mock(start=0, stop=0)
f = NumericRangeFilter()
f.filter(qs, value)
qs.filter.assert_called_once_with(None__exact=(0, 0))
class RangeFilterTests(TestCase):
def test_default_field(self):
f = RangeFilter()
field = f.field
self.assertIsInstance(field, RangeField)
def test_filtering_range(self):
qs = mock.Mock(spec=['filter'])
value = mock.Mock(start=20, stop=30)
f = RangeFilter()
f.filter(qs, value)
qs.filter.assert_called_once_with(None__range=(20, 30))
def test_filtering_exclude(self):
qs = mock.Mock(spec=['exclude'])
value = mock.Mock(start=20, stop=30)
f = RangeFilter(exclude=True)
f.filter(qs, value)
qs.exclude.assert_called_once_with(None__range=(20, 30))
def test_filtering_start(self):
qs = mock.Mock(spec=['filter'])
value = mock.Mock(start=20, stop=None)
f = RangeFilter()
f.filter(qs, value)
qs.filter.assert_called_once_with(None__gte=20)
def test_filtering_stop(self):
qs = mock.Mock(spec=['filter'])
value = mock.Mock(start=None, stop=30)
f = RangeFilter()
f.filter(qs, value)
qs.filter.assert_called_once_with(None__lte=30)
def test_filtering_skipped_with_none_value(self):
qs = mock.Mock(spec=['filter'])
f = RangeFilter()
result = f.filter(qs, None)
self.assertEqual(qs, result)
def test_filtering_ignores_lookup_type(self):
qs = mock.Mock()
value = mock.Mock(start=20, stop=30)
f = RangeFilter(lookup_type='gte')
f.filter(qs, value)
qs.filter.assert_called_once_with(None__range=(20, 30))
class DateRangeFilterTests(TestCase):
def test_creating(self):
f = DateRangeFilter()
self.assertIn('choices', f.extra)
self.assertEqual(len(DateRangeFilter.options), len(f.extra['choices']))
def test_default_field(self):
f = DateRangeFilter()
field = f.field
self.assertIsInstance(field, forms.ChoiceField)
def test_filtering(self):
qs = mock.Mock(spec=['all'])
f = DateRangeFilter()
f.filter(qs, '')
qs.all.assert_called_once_with()
# the correct behavior fails right now
@unittest.expectedFailure
def test_filtering_skipped_with_blank_value(self):
qs = mock.Mock(spec=[])
f = DateRangeFilter()
result = f.filter(qs, '')
self.assertEqual(qs, result)
@unittest.expectedFailure
def test_filtering_skipped_with_out_of_range_value(self):
qs = mock.Mock(spec=[])
f = DateRangeFilter()
result = f.filter(qs, 999)
self.assertEqual(qs, result)
def test_filtering_for_this_year(self):
qs = mock.Mock(spec=['filter'])
with mock.patch('django_filters.filters.now') as mock_now:
now_dt = mock_now.return_value
f = DateRangeFilter()
f.filter(qs, '4')
qs.filter.assert_called_once_with(
None__year=now_dt.year)
def test_filtering_for_this_month(self):
qs = mock.Mock(spec=['filter'])
with mock.patch('django_filters.filters.now') as mock_now:
now_dt = mock_now.return_value
f = DateRangeFilter()
f.filter(qs, '3')
qs.filter.assert_called_once_with(
None__year=now_dt.year, None__month=now_dt.month)
def test_filtering_for_7_days(self):
qs = mock.Mock(spec=['filter'])
with mock.patch('django_filters.filters.now'):
with mock.patch('django_filters.filters.timedelta') as mock_td:
with mock.patch(
'django_filters.filters._truncate') as mock_truncate:
mock_dt1, mock_dt2 = mock.MagicMock(), mock.MagicMock()
mock_truncate.side_effect = [mock_dt1, mock_dt2]
f = DateRangeFilter()
f.filter(qs, '2')
self.assertEqual(mock_td.call_args_list,
[mock.call(days=7), mock.call(days=1)])
qs.filter.assert_called_once_with(
None__lt=mock_dt2, None__gte=mock_dt1)
def test_filtering_for_today(self):
qs = mock.Mock(spec=['filter'])
with mock.patch('django_filters.filters.now') as mock_now:
now_dt = mock_now.return_value
f = DateRangeFilter()
f.filter(qs, '1')
qs.filter.assert_called_once_with(
None__year=now_dt.year,
None__month=now_dt.month,
None__day=now_dt.day)
def test_filtering_for_yesterday(self):
qs = mock.Mock(spec=['filter'])
with mock.patch('django_filters.filters.now') as mock_now:
now_dt = mock_now.return_value
f = DateRangeFilter()
f.filter(qs, '5')
qs.filter.assert_called_once_with(
None__year=now_dt.year,
None__month=now_dt.month,
None__day=(now_dt - timedelta(days=1)).day,
)
class DateFromToRangeFilterTests(TestCase):
def test_default_field(self):
f = DateFromToRangeFilter()
field = f.field
self.assertIsInstance(field, DateRangeField)
def test_filtering_range(self):
qs = mock.Mock(spec=['filter'])
value = mock.Mock(start=date(2015, 4, 7), stop=date(2015, 9, 6))
f = DateFromToRangeFilter()
f.filter(qs, value)
qs.filter.assert_called_once_with(
None__range=(date(2015, 4, 7), date(2015, 9, 6)))
def test_filtering_start(self):
qs = mock.Mock(spec=['filter'])
value = mock.Mock(start=date(2015, 4, 7), stop=None)
f = DateFromToRangeFilter()
f.filter(qs, value)
qs.filter.assert_called_once_with(None__gte=date(2015, 4, 7))
def test_filtering_stop(self):
qs = mock.Mock(spec=['filter'])
value = mock.Mock(start=None, stop=date(2015, 9, 6))
f = DateFromToRangeFilter()
f.filter(qs, value)
qs.filter.assert_called_once_with(None__lte=date(2015, 9, 6))
def test_filtering_skipped_with_none_value(self):
qs = mock.Mock(spec=['filter'])
f = DateFromToRangeFilter()
result = f.filter(qs, None)
self.assertEqual(qs, result)
def test_filtering_ignores_lookup_type(self):
qs = mock.Mock()
value = mock.Mock(start=date(2015, 4, 7), stop=date(2015, 9, 6))
f = DateFromToRangeFilter(lookup_type='gte')
f.filter(qs, value)
qs.filter.assert_called_once_with(
None__range=(date(2015, 4, 7), date(2015, 9, 6)))
class TimeRangeFilterTests(TestCase):
def test_default_field(self):
f = TimeRangeFilter()
field = f.field
self.assertIsInstance(field, TimeRangeField)
def test_filtering_range(self):
qs = mock.Mock(spec=['filter'])
value = mock.Mock(start=time(10, 15), stop=time(12, 30))
f = TimeRangeFilter()
f.filter(qs, value)
qs.filter.assert_called_once_with(
None__range=(time(10, 15), time(12, 30)))
def test_filtering_start(self):
qs = mock.Mock(spec=['filter'])
value = mock.Mock(start=time(10, 15), stop=None)
f = TimeRangeFilter()
f.filter(qs, value)
qs.filter.assert_called_once_with(None__gte=time(10, 15))
def test_filtering_stop(self):
qs = mock.Mock(spec=['filter'])
value = mock.Mock(start=None, stop=time(12, 30))
f = TimeRangeFilter()
f.filter(qs, value)
qs.filter.assert_called_once_with(None__lte=time(12, 30))
def test_filtering_skipped_with_none_value(self):
qs = mock.Mock(spec=['filter'])
f = TimeRangeFilter()
result = f.filter(qs, None)
self.assertEqual(qs, result)
def test_filtering_ignores_lookup_type(self):
qs = mock.Mock()
value = mock.Mock(start=time(10, 15), stop=time(12, 30))
f = TimeRangeFilter(lookup_type='gte')
f.filter(qs, value)
qs.filter.assert_called_once_with(
None__range=(time(10, 15), time(12, 30)))
class AllValuesFilterTests(TestCase):
def test_default_field_without_assigning_model(self):
f = AllValuesFilter()
with self.assertRaises(AttributeError):
f.field
def test_default_field_with_assigning_model(self):
mocked = mock.Mock()
chained_call = '.'.join(['_default_manager', 'distinct.return_value',
'order_by.return_value', 'values_list.return_value'])
mocked.configure_mock(**{chained_call: iter([])})
f = AllValuesFilter()
f.model = mocked
field = f.field
self.assertIsInstance(field, forms.ChoiceField)
class LookupTypesTests(TestCase):
def test_custom_lookup_types(self):
filters.LOOKUP_TYPES = [
('', '---------'),
('exact', 'Is equal to'),
('not_exact', 'Is not equal to'),
('lt', 'Lesser than'),
('gt', 'Greater than'),
('gte', 'Greater than or equal to'),
('lte', 'Lesser than or equal to'),
('startswith', 'Starts with'),
('endswith', 'Ends with'),
('contains', 'Contains'),
('not_contains', 'Does not contain'),
]
f = Filter(lookup_type=None)
field = f.field
choice_field = field.fields[1]
all_choices = choice_field.choices
self.assertIsInstance(field, LookupTypeField)
self.assertEqual(all_choices, filters.LOOKUP_TYPES)
self.assertEqual(all_choices[1][0], 'exact')
self.assertEqual(all_choices[1][1], 'Is equal to')
custom_f = Filter(lookup_type=('endswith', 'not_contains'))
custom_field = custom_f.field
custom_choice_field = custom_field.fields[1]
my_custom_choices = custom_choice_field.choices
available_lookup_types = [
('endswith', 'Ends with'),
('not_contains', 'Does not contain'),
]
self.assertIsInstance(custom_field, LookupTypeField)
self.assertEqual(my_custom_choices, available_lookup_types)
self.assertEqual(my_custom_choices[0][0], 'endswith')
self.assertEqual(my_custom_choices[0][1], 'Ends with')
self.assertEqual(my_custom_choices[1][0], 'not_contains')
self.assertEqual(my_custom_choices[1][1], 'Does not contain')
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import synapse
from synapse.server import HomeServer
from synapse.config._base import ConfigError
from synapse.config.logger import setup_logging
from synapse.config.homeserver import HomeServerConfig
from synapse.crypto import context_factory
from synapse.http.site import SynapseSite
from synapse.federation import send_queue
from synapse.federation.units import Edu
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.transactions import TransactionStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.storage.engines import create_engine
from synapse.storage.presence import UserPresenceState
from synapse.util.async import sleep
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.rlimit import change_resource_limit
from synapse.util.versionstring import get_version_string
from synapse import events
from twisted.internet import reactor, defer
from twisted.web.resource import Resource
from daemonize import Daemonize
import sys
import logging
import gc
import ujson as json
logger = logging.getLogger("synapse.app.appservice")
class FederationSenderSlaveStore(
SlavedDeviceInboxStore, TransactionStore, SlavedReceiptsStore, SlavedEventStore,
SlavedRegistrationStore, SlavedDeviceStore,
):
pass
class FederationSenderServer(HomeServer):
def get_db_conn(self, run_new_connection=True):
# Any param beginning with cp_ is a parameter for adbapi, and should
# not be passed to the database engine.
db_params = {
k: v for k, v in self.db_config.get("args", {}).items()
if not k.startswith("cp_")
}
db_conn = self.database_engine.module.connect(**db_params)
if run_new_connection:
self.database_engine.on_new_connection(db_conn)
return db_conn
def setup(self):
logger.info("Setting up.")
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
logger.info("Finished setting up.")
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(self)
root_resource = create_resource_tree(resources, Resource())
for address in bind_addresses:
reactor.listenTCP(
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
),
interface=address
)
logger.info("Synapse federation_sender now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
bind_addresses = listener["bind_addresses"]
for address in bind_addresses:
reactor.listenTCP(
listener["port"],
manhole(
username="matrix",
password="rabbithole",
globals={"hs": self},
),
interface=address
)
else:
logger.warn("Unrecognized listener type: %s", listener["type"])
@defer.inlineCallbacks
def replicate(self):
http_client = self.get_simple_http_client()
store = self.get_datastore()
replication_url = self.config.worker_replication_url
send_handler = FederationSenderHandler(self)
send_handler.on_start()
while True:
try:
args = store.stream_positions()
args.update((yield send_handler.stream_positions()))
args["timeout"] = 30000
result = yield http_client.get_json(replication_url, args=args)
yield store.process_replication(result)
yield send_handler.process_replication(result)
except:
logger.exception("Error replicating from %r", replication_url)
yield sleep(30)
def start(config_options):
try:
config = HomeServerConfig.load_config(
"Synapse federation sender", config_options
)
except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.federation_sender"
setup_logging(config.worker_log_config, config.worker_log_file)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
if config.send_federation:
sys.stderr.write(
"\nThe send_federation must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``send_federation: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the pushers to start since they will be disabled in the main config
config.send_federation = True
tls_server_context_factory = context_factory.ServerContextFactory(config)
ps = FederationSenderServer(
config.server_name,
db_config=config.database_config,
tls_server_context_factory=tls_server_context_factory,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
ps.setup()
ps.start_listening(config.worker_listeners)
def run():
with LoggingContext("run"):
logger.info("Running")
change_resource_limit(config.soft_file_limit)
if config.gc_thresholds:
gc.set_threshold(*config.gc_thresholds)
reactor.run()
def start():
ps.replicate()
ps.get_datastore().start_profiling()
ps.get_state_handler().start_caching()
reactor.callWhenRunning(start)
if config.worker_daemonize:
daemon = Daemonize(
app="synapse-federation-sender",
pid=config.worker_pid_file,
action=run,
auto_close_fds=False,
verbose=True,
logger=logger,
)
daemon.start()
else:
run()
class FederationSenderHandler(object):
"""Processes the replication stream and forwards the appropriate entries
to the federation sender.
"""
def __init__(self, hs):
self.store = hs.get_datastore()
self.federation_sender = hs.get_federation_sender()
self._room_serials = {}
self._room_typing = {}
def on_start(self):
# There may be some events that are persisted but haven't been sent,
# so send them now.
self.federation_sender.notify_new_events(
self.store.get_room_max_stream_ordering()
)
@defer.inlineCallbacks
def stream_positions(self):
stream_id = yield self.store.get_federation_out_pos("federation")
defer.returnValue({
"federation": stream_id,
# Ack stuff we've "processed", this should only be called from
# one process.
"federation_ack": stream_id,
})
@defer.inlineCallbacks
def process_replication(self, result):
# The federation stream contains things that we want to send out, e.g.
# presence, typing, etc.
fed_stream = result.get("federation")
if fed_stream:
latest_id = int(fed_stream["position"])
# The federation stream containis a bunch of different types of
# rows that need to be handled differently. We parse the rows, put
# them into the appropriate collection and then send them off.
presence_to_send = {}
keyed_edus = {}
edus = {}
failures = {}
device_destinations = set()
# Parse the rows in the stream
for row in fed_stream["rows"]:
position, typ, content_js = row
content = json.loads(content_js)
if typ == send_queue.PRESENCE_TYPE:
destination = content["destination"]
state = UserPresenceState.from_dict(content["state"])
presence_to_send.setdefault(destination, []).append(state)
elif typ == send_queue.KEYED_EDU_TYPE:
key = content["key"]
edu = Edu(**content["edu"])
keyed_edus.setdefault(
edu.destination, {}
)[(edu.destination, tuple(key))] = edu
elif typ == send_queue.EDU_TYPE:
edu = Edu(**content)
edus.setdefault(edu.destination, []).append(edu)
elif typ == send_queue.FAILURE_TYPE:
destination = content["destination"]
failure = content["failure"]
failures.setdefault(destination, []).append(failure)
elif typ == send_queue.DEVICE_MESSAGE_TYPE:
device_destinations.add(content["destination"])
else:
raise Exception("Unrecognised federation type: %r", typ)
# We've finished collecting, send everything off
for destination, states in presence_to_send.items():
self.federation_sender.send_presence(destination, states)
for destination, edu_map in keyed_edus.items():
for key, edu in edu_map.items():
self.federation_sender.send_edu(
edu.destination, edu.edu_type, edu.content, key=key,
)
for destination, edu_list in edus.items():
for edu in edu_list:
self.federation_sender.send_edu(
edu.destination, edu.edu_type, edu.content, key=None,
)
for destination, failure_list in failures.items():
for failure in failure_list:
self.federation_sender.send_failure(destination, failure)
for destination in device_destinations:
self.federation_sender.send_device_messages(destination)
# Record where we are in the stream.
yield self.store.update_federation_out_pos(
"federation", latest_id
)
# We also need to poke the federation sender when new events happen
event_stream = result.get("events")
if event_stream:
latest_pos = event_stream["position"]
self.federation_sender.notify_new_events(latest_pos)
if __name__ == '__main__':
with LoggingContext("main"):
start(sys.argv[1:])
| |
#!/usr/bin/env python
"""
Extract alignment statistics from a SAM/BAM file.
Adapted from the Celloline stats script
available at: https://github.com/Teichlab/celloline/blob/master/lib/stats.py
"""
import os
import sys
import re
import argparse
import pysam
import logging
import cPickle as pickle
from collections import Counter, defaultdict, OrderedDict
from intervaltree import IntervalTree
from joblib import Parallel, delayed
#LOAD GTF FILE
def load_gtf(gtf_path):
"""
Load a GTF annotation and create an index using IntervalTrees.
Args:
gtf_path: Path to the GTF file to load.
Returns:
Dictionary containing IntervalTree indexes of the annotation.
"""
gtf_index = defaultdict()
with open(gtf_path) as gtf_file:
for line in gtf_file:
if not line.startswith("#"):
entry = line.split("\t")
entry_addition = entry[8]
entry_addition = entry_addition.split(";")
entry_addition = entry_addition[0].split(" ")
gene_id = entry_addition[1]
feature = entry[2]
#TYPE(Gene, exon etc.), START, END, STRAND, gene_ID
info = [feature, entry[3], entry[4], entry[6], gene_id]
#Build GTF INDEX
if feature != "" and entry[3] != entry[4]:
if entry[0] in gtf_index:
index = gtf_index[entry[0]]
else:
index = IntervalTree()
index.addi(int(info[1]), int(info[2]), info)
gtf_index[entry[0]] = index
return gtf_index
def gen_stats(input_file, input_type, sample_name, gtf_dict):
"""
Generate alignment stats from a SAM/BAM file.
Loop over alignments in a SAM/BAM file and extract statistics such as the
numer of reads aligned to introns, exons, intergenic regions etc.
Args:
input_file: An open BAM or SAM file.
input_type: Whether the file is 'bam' or 'sam'.
sample_name: A name relating to this file.
gtf_dict: Dictionary containing GTF index.
Returns:
Dictionary containing alignment statistics.
"""
logger = logging.getLogger("stats." + sample_name[0:10])
#OUTPUT TABLE CONTAING STATS
output_table = OrderedDict()
#Dict indicating to which genes a specific read maps to
#It is a temporary dict
exonic_mappings_temp = defaultdict(str)
#Dict indicating which read is multi-mapped
#It is a temporary dict
exonic_multi_table = defaultdict(str)
# Sample
output_table["sample"] = sample_name
#MAPPABILITY
output_table["total"] = 0
output_table["mapped"] = 0
output_table["unmapped"] = 0
output_table["unique"] = 0
output_table["multi"] = 0
#CODING VERSUS NON-CODING REGIONS
output_table["intergenic"] = 0
output_table["intragenic"] = 0
output_table["exonic"] = 0
output_table["intronic"] = 0
output_table["ambigious"] = 0
#CODING REGIONS MAPPABILITY
output_table["exonicU"] = 0
output_table["exonicM"] = 0
#ALIGNMENT CODING VS NONCODING
output_table["alignments"] = 0
output_table["multi-intergenic"] = 0
output_table["multi-intragenic"] = 0
output_table["multi-exonic"] = 0
output_table["multi-intronic"] = 0
output_table["multi-ambigious"] = 0
#ERROR
output_table["perfect"] = 0
output_table["partly_perfect"] = 0
output_table["mapped_no_correct"] = 0
for i in range(0, 10):
output_table["S_" + str(i)] = 0
output_table["S_10+"] = 0
output_table["I"] = 0
output_table["D"] = 0
output_table["INDEL"] = 0
reads = Counter()
if input_type == "bam":
ref_map = input_file.references
input_file = input_file.fetch(until_eof=True)
line_count = 0
for line in input_file:
line_count += 1
if input_type == "bam": # BAM input line
split = str(line).split("\t")
split[2] = ref_map[int(split[2])]
split[3] = int(split[3]) + 1
elif not line.startswith("@"): # SAM input line
split = line.split("\t")
else:
continue
read_name = split[0]
flag_code = int(split[1])
chrom = split[2]
pos = split[3]
errors = split[5]
errors_a = list(errors)
number = ""
num = 0
error_table = defaultdict(int)
name_and_flag = read_name
#CHECK IF READ MAPPED OR UNMAPPED
#IT US UNMAPPED
if flag_code & 0x0004 != 0:
output_table["unmapped"] += 1
output_table["total"] += 1
error_table["*"] += 1
#IT IS MAPPED
else:
if flag_code & 0x0001 != 0: #This is paired end
if flag_code & 0x0040 != 0: #1st read
name_and_flag += ";first"
if flag_code & 0x0080 != 0: #2nd read
name_and_flag += ";second"
# CHECK TO WHICH GENE(S) IT MAPPED TO
genes_info, num_genes, num_exons = get_gene(gtf_dict, [chrom, pos])
output_table["alignments"] += 1.0
#STATS
if name_and_flag not in reads:
reads[name_and_flag] += 1
output_table["unique"] += 1
output_table["total"] += 1
output_table["mapped"] += 1
if num_genes == 0:
output_table["intergenic"] += 1
elif num_genes == 1:
output_table["intragenic"] += 1
if num_exons == 0:
output_table["intronic"] += 1
else:
output_table["exonic"] += 1
output_table["exonicU"] += 1
exons = []
if name_and_flag in exonic_mappings_temp:
exons = exonic_mappings_temp[name_and_flag]
exons.append([genes_info[0], chrom, pos])
exonic_mappings_temp[name_and_flag] = exons
elif num_genes > 1:
output_table["ambigious"] += 1
#READ IS MULTI-MAPPED
else:
if reads[name_and_flag] == 1:
output_table["unique"] -= 1
output_table["exonicU"] -= 1
output_table["multi"] += 1
reads[name_and_flag] += 1
exons = []
#GET KNOWLEDGE IF FIRST MAPPING EXONIC OR INTRONIC
if name_and_flag in exonic_mappings_temp:
exons = exonic_mappings_temp[name_and_flag]
if num_genes == 0:
output_table["multi-intergenic"] += (1)
elif num_genes == 1:
output_table["multi-intragenic"] += (1)
if num_exons == 0:
output_table["multi-intronic"] += (1)
else:
output_table["multi-exonic"] += (1)
exons.append([genes_info[0], chrom, pos])
elif num_genes > 1:
output_table["multi-ambigious"] += (1)
#IF AT LEAST ONE EXONIC ALIGNMENT
if len(exons) > 0:
exonic_multi_table[name_and_flag] = exons
#PARSE MAPPING ERRORS
for i in errors_a:
if re.match("[0-9]", i):
number += (i)
elif re.match("[A-Z]", i):
num = int(number)
error_table[i] += num
number = ""
#TABLE OF HOW MANY READS MAP PERFECT, PARTLY PERFECT ETC
if "M" in error_table and len(error_table) == 1:
output_table["perfect"] += 1
elif "M" in error_table and len(error_table) > 1:
output_table["partly_perfect"] += 1
elif "M" not in error_table and "*" not in error_table:
output_table["mapped_no_correct"] += 1
if "S" in error_table:
if int(error_table["S"]) < 10:
output_table["S_" + str(error_table["S"])] += 1
else:
output_table["S_10+"] += 1
elif "S" not in error_table:
output_table["S_0"] += 1
if "I" in error_table:
output_table["I"] += 1
if "D" in error_table:
output_table["D"] += 1
if "I" in error_table or "D" in error_table:
output_table["INDEL"] += 1
if (line_count % 1000000) == 0:
logger.debug(sample_name + " line " + str(line_count) + "...")
output_table["exonicM"] = len(exonic_multi_table.keys())
return output_table
def get_stats_line(stats_table):
"""
Get an output line from a stats table.
Args:
stats_table: Dictionary of alignment statistics.
Returns:
String representing the results for one file.
"""
logger = logging.getLogger("stats.extract")
out_line = ""
for stat, value in stats_table.iteritems():
if stat in ["unique", "multi", "intragenic", "intergenic",
"exonic", "intronic", "ambigious", "exonicM", "exonicU"]:
value = (value + 0.0) / (stats_table["mapped"] + 0.0)
value = "%.2f" % (100.0 * (value))
elif stat in ["multi-intragenic", "multi-intergenic", "multi-exonic",
"multi-intronic", "multi-ambigious"]:
value = (value + 0.0)
if stats_table["alignments"] != 0:
value = value / (stats_table["alignments"] + 0.0)
value = "%.2f" % (100.0 * (value))
value = str(value)
if not stat == "sample":
out_line += "," + value
else:
out_line += value
logger.debug(stat + " : " + value)
out_line += "\n"
return out_line
def write_stats(output_path, stats_list):
"""
Write a series of results to a file.
Args:
output_path: Path to write results to.
stats_list: List of dictionaries containing results from input files.
"""
cols = stats_list[0].keys()
with open(output_path, "w") as out_file:
out_file.write(",".join(cols) + "\n")
for stats_table in stats_list:
stats_line = get_stats_line(stats_table)
out_file.write(stats_line)
def get_gene(gtf_dict, pos_pair):
"""
Identify which genes overlap a given position.
Args:
gtf_dict: Dictionary containing GTF index.
pos_pair: Tuple containing genomic position (chrom, pos).
Returns:
Tuple containing the list of overlapping genes, the number of
overlapping genes and the number of overlapping exons.
"""
num_genes = 0
num_exons = 0
if pos_pair[0] not in gtf_dict:
#print ("Ignored pos: " + pos_pair[0])
return ([], num_genes, num_exons)
entries = gtf_dict[pos_pair[0]]
pos = int(pos_pair[1])
found = []
found = entries.search(pos)
gene_list = []
for entry in found:
info = entry[2]
if info[0] == "gene":
gene_list.append(info)
num_genes += 1
elif info[0] == "exon":
num_exons += 1
return (gene_list, num_genes, num_exons)
def process_file(input_file, input_type, index, is_parallel):
"""
Process an individual SAM/BAM file.
How we want to process the file depends on the input type and whether we
are operating in parallel. If in parallel the index must be loaded for each
input file. If the input is a BAM file it needs to be read using Pysam, if
SAM it can be read directly as a text file.
Args:
input_file: Path to the input file.
input_type: Whether the file is 'bam' or 'sam'.
index: If operating in parallel a string to the index file, if not the
loaded GTF index dictionary.
is_parallel: Whether to operate in parallel.
Returns:
Dictionary containing alignment statistics for the input file.
"""
sample_name = input_file.split("/")[-1]
logger = logging.getLogger("stats." + sample_name[0:10])
logger.info("Processing " + sample_name + "...")
if is_parallel:
logger.info("Loading index...")
with open(index, "rb") as index_file:
loaded_index = pickle.load(index_file)
logger.info("Loaded.")
else:
loaded_index = index
if input_type == "sam":
logger.info("Parsing SAM file...")
with open(input_file) as sam:
output_table = gen_stats(sam, input_type, sample_name, loaded_index)
elif input_type == "bam":
logger.info("Parsing BAM file...")
bam = pysam.AlignmentFile(input_file, "rb")
output_table = gen_stats(bam, input_type, sample_name, loaded_index)
logger.info("Finished " + sample_name)
return output_table
def get_index(args):
"""
Load a GTF index if available or create from GTF file if not found.
If a valid path to an index file is given that file will be loaded. If no
index file was specified or the file does not exist the annotation will be
read from a GTF file. It will then be pickled if an index file is specified.
When running in parallel the path to the index file is returned rather than
the index dictionary itself.
Args:
args: Options from the command line.
Returns:
Dictionary containing GTF index or path to index file if in parallel.
"""
logger = logging.getLogger("stats.index")
if args.index and os.path.isfile(args.index):
logger.info("Index found at " + args.index)
if not args.is_parallel:
logger.info("Loading index...")
with open(args.index, "rb") as index_file:
index = pickle.load(index_file)
logger.info("Loaded.")
else:
index = args.index
elif args.gtf and os.path.isfile(args.gtf):
logger.info("No index file found.")
logger.info("Loading GTF file...")
gtf_dict = load_gtf(args.gtf)
logger.info("Loaded.")
if args.index:
logger.info("Saving index to " + args.index + "...")
with open(args.index, "wb") as index_file:
pickle.dump(gtf_dict, index_file, -1)
logger.info("Saved.")
if not args.is_parallel:
index = gtf_dict
else:
index = args.index
return index
def get_args():
"""
Read arguments from the command line and check they are valid.
"""
logger = logging.getLogger("stats.args")
parser = argparse.ArgumentParser(
description="Extract alignment statistics from a SAM/BAM file")
parser.add_argument("inputs",
metavar="SAM/BAM",
nargs="+",
help="Input SAM or BAM files")
parser.add_argument("-o", "--out",
help="Output file",
required=True)
parser.add_argument("-g", "--gtf",
help="GTF annotation file")
parser.add_argument("-i", "--index",
help="""Annotation index file. Required when
operating in parallel.""")
parser.add_argument("-t", "--type",
choices=["sam", "bam"],
help="Type of input file",
required=True)
parser.add_argument("-p", "--parallel",
type=int,
default=1,
help="""Number of files to process in parallel.
Requires N + 1 threads if greater than 1.""")
args = parser.parse_args()
args.is_parallel = False
if args.parallel < 1:
logger.error("Number of parallel files must be positive")
sys.exit()
elif args.parallel > 1:
args.is_parallel = True
logger.info("Running with " + str(args.parallel) + " jobs")
if args.is_parallel and not args.index:
logger.error("Index file is required when running in parallel.")
sys.exit()
if not (args.index and os.path.isfile(args.index)):
if not (args.gtf and os.path.isfile(args.gtf)):
logger.error("No GTF or index file found.")
sys.exit()
return args
def setup_logging():
"""
Setup logging system.
Log is written to 'alignmentStats.log'.
"""
logger = logging.getLogger("stats")
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
file_handler = logging.FileHandler('alignmentStats.log')
file_handler.setLevel(logging.INFO)
# create console handler with a higher log level
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
format_str = "[%(asctime)s] %(levelname)s %(name)s: %(message)s"
formatter = logging.Formatter(format_str, "%Y-%m-%d %H:%M:%S")
file_handler.setFormatter(formatter)
format_str = "[%(asctime)s] %(message)s"
formatter = logging.Formatter(format_str, "%H:%M:%S")
console_handler.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(console_handler)
logger.addHandler(file_handler)
def main():
"""
Main function.
1. Setup logging
2. Get arguments
3. Get index
4. Process files
5. Write output
"""
setup_logging()
logger = logging.getLogger("stats." + __name__)
args = get_args()
index = get_index(args)
logger.warning("Positions not in annotation will be ignored.")
logger.info("Found " + str(len(args.inputs)) + " input file(s):")
for input_file in sorted(args.inputs):
logger.debug(input_file)
if args.is_parallel:
stats = Parallel(n_jobs=args.parallel,
verbose=100,
batch_size=1)(delayed(process_file)(input_file,
args.type,
index,
args.is_parallel)
for input_file in args.inputs)
else:
stats = []
for input_file in args.inputs:
output_table = process_file(input_file, args.type, index,
args.is_parallel)
stats.append(output_table)
write_stats(args.out, stats)
if __name__ == "__main__":
main()
| |
from sqlalchemy import Column
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import util
from sqlalchemy.ext.hybrid import hybrid_method
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import aliased
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import Session
from sqlalchemy.orm import synonym
from sqlalchemy.orm import util as orm_util
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.orm.path_registry import PathRegistry
from sqlalchemy.orm.path_registry import RootRegistry
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.util import compat
from test.orm import _fixtures
from .inheritance import _poly_fixtures
class AliasedClassTest(fixtures.MappedTest, AssertsCompiledSQL):
__dialect__ = "default"
def _fixture(self, cls, properties={}):
table = Table(
"point",
MetaData(),
Column("id", Integer(), primary_key=True),
Column("x", Integer),
Column("y", Integer),
)
clear_mappers()
self.mapper_registry.map_imperatively(
cls, table, properties=properties
)
return table
def test_simple(self):
class Point(object):
pass
table = self._fixture(Point)
alias = aliased(Point)
assert alias.id
assert alias.x
assert alias.y
assert Point.id.__clause_element__().table is table
assert alias.id.__clause_element__().table is not table
def test_named_entity(self):
class Point(object):
pass
self._fixture(Point)
alias = aliased(Point, name="pp")
self.assert_compile(
select(alias), "SELECT pp.id, pp.x, pp.y FROM point AS pp"
)
def test_named_selectable(self):
class Point(object):
pass
table = self._fixture(Point)
alias = aliased(table, name="pp")
self.assert_compile(
select(alias), "SELECT pp.id, pp.x, pp.y FROM point AS pp"
)
def test_not_instantiatable(self):
class Point(object):
pass
self._fixture(Point)
alias = aliased(Point)
assert_raises(TypeError, alias)
def test_instancemethod(self):
class Point(object):
def zero(self):
self.x, self.y = 0, 0
self._fixture(Point)
alias = aliased(Point)
assert Point.zero
assert getattr(alias, "zero")
def test_classmethod(self):
class Point(object):
@classmethod
def max_x(cls):
return 100
self._fixture(Point)
alias = aliased(Point)
assert Point.max_x
assert alias.max_x
assert Point.max_x() == alias.max_x() == 100
def test_simple_property(self):
class Point(object):
@property
def max_x(self):
return 100
self._fixture(Point)
alias = aliased(Point)
assert Point.max_x
assert Point.max_x != 100
assert alias.max_x
assert Point.max_x is alias.max_x
def test_descriptors(self):
class descriptor(object):
def __init__(self, fn):
self.fn = fn
def __get__(self, obj, owner):
if obj is not None:
return self.fn(obj, obj)
else:
return self
def method(self):
return "method"
class Point(object):
center = (0, 0)
@descriptor
def thing(self, arg):
return arg.center
self._fixture(Point)
alias = aliased(Point)
assert Point.thing != (0, 0)
assert Point().thing == (0, 0)
assert Point.thing.method() == "method"
assert alias.thing != (0, 0)
assert alias.thing.method() == "method"
def _assert_has_table(self, expr, table):
from sqlalchemy import Column # override testlib's override
for child in expr.get_children():
if isinstance(child, Column):
assert child.table is table
def test_hybrid_descriptor_one(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
@hybrid_method
def left_of(self, other):
return self.x < other.x
self._fixture(Point)
alias = aliased(Point)
sess = fixture_session()
self.assert_compile(
sess.query(alias).filter(alias.left_of(Point)),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x < point.x",
)
def test_hybrid_descriptor_two(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
@hybrid_property
def double_x(self):
return self.x * 2
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.double_x), "Point.double_x")
eq_(str(alias.double_x), "AliasedClass_Point.double_x")
eq_(str(Point.double_x.__clause_element__()), "point.x * :x_1")
eq_(str(alias.double_x.__clause_element__()), "point_1.x * :x_1")
sess = fixture_session()
self.assert_compile(
sess.query(alias).filter(alias.double_x > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x * :x_1 > point.x",
)
def test_hybrid_descriptor_three(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
@hybrid_property
def x_alone(self):
return self.x
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.x_alone), "Point.x_alone")
eq_(str(alias.x_alone), "AliasedClass_Point.x_alone")
# from __clause_element__() perspective, Point.x_alone
# and Point.x return the same thing, so that's good
eq_(str(Point.x.__clause_element__()), "point.x")
eq_(str(Point.x_alone.__clause_element__()), "point.x")
# same for the alias
eq_(str(alias.x + 1), "point_1.x + :x_1")
eq_(str(alias.x_alone + 1), "point_1.x + :x_1")
point_mapper = inspect(Point)
eq_(
Point.x_alone._annotations,
{
"entity_namespace": point_mapper,
"parententity": point_mapper,
"parentmapper": point_mapper,
"proxy_key": "x_alone",
"proxy_owner": point_mapper,
},
)
eq_(
Point.x._annotations,
{
"entity_namespace": point_mapper,
"parententity": point_mapper,
"parentmapper": point_mapper,
"proxy_key": "x",
"proxy_owner": point_mapper,
},
)
eq_(str(alias.x_alone == alias.x), "point_1.x = point_1.x")
a2 = aliased(Point)
eq_(str(a2.x_alone == alias.x), "point_1.x = point_2.x")
eq_(
a2.x._annotations,
{
"entity_namespace": inspect(a2),
"parententity": inspect(a2),
"parentmapper": point_mapper,
"proxy_key": "x",
"proxy_owner": inspect(a2),
},
)
sess = fixture_session()
self.assert_compile(
sess.query(alias).filter(alias.x_alone > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x > point.x",
)
def test_proxy_descriptor_one(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
self._fixture(Point, properties={"x_syn": synonym("x")})
alias = aliased(Point)
eq_(str(Point.x_syn), "Point.x_syn")
eq_(str(alias.x_syn), "AliasedClass_Point.x_syn")
sess = fixture_session()
self.assert_compile(
sess.query(alias.x_syn).filter(alias.x_syn > Point.x_syn),
"SELECT point_1.x AS point_1_x FROM point AS point_1, point "
"WHERE point_1.x > point.x",
)
def test_meta_getattr_one(self):
class MetaPoint(type):
def __getattr__(cls, key):
if key == "x_syn":
return cls.x
raise AttributeError(key)
class Point(compat.with_metaclass(MetaPoint)):
pass
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.x_syn), "Point.x")
eq_(str(alias.x_syn), "AliasedClass_Point.x")
# from __clause_element__() perspective, Point.x_syn
# and Point.x return the same thing, so that's good
eq_(str(Point.x.__clause_element__()), "point.x")
eq_(str(Point.x_syn.__clause_element__()), "point.x")
# same for the alias
eq_(str(alias.x + 1), "point_1.x + :x_1")
eq_(str(alias.x_syn + 1), "point_1.x + :x_1")
is_(Point.x_syn.__clause_element__(), Point.x.__clause_element__())
eq_(str(alias.x_syn == alias.x), "point_1.x = point_1.x")
a2 = aliased(Point)
eq_(str(a2.x_syn == alias.x), "point_1.x = point_2.x")
sess = fixture_session()
self.assert_compile(
sess.query(alias).filter(alias.x_syn > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x > point.x",
)
def test_meta_getattr_two(self):
class MetaPoint(type):
def __getattr__(cls, key):
if key == "double_x":
return cls._impl_double_x
raise AttributeError(key)
class Point(compat.with_metaclass(MetaPoint)):
@hybrid_property
def _impl_double_x(self):
return self.x * 2
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.double_x), "Point._impl_double_x")
eq_(str(alias.double_x), "AliasedClass_Point._impl_double_x")
eq_(str(Point.double_x.__clause_element__()), "point.x * :x_1")
eq_(str(alias.double_x.__clause_element__()), "point_1.x * :x_1")
sess = fixture_session()
self.assert_compile(
sess.query(alias).filter(alias.double_x > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x * :x_1 > point.x",
)
def test_meta_getattr_three(self):
class MetaPoint(type):
def __getattr__(cls, key):
@hybrid_property
def double_x(me):
return me.x * 2
if key == "double_x":
return double_x.__get__(None, cls)
raise AttributeError(key)
class Point(compat.with_metaclass(MetaPoint)):
pass
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.double_x.__clause_element__()), "point.x * :x_1")
eq_(str(alias.double_x.__clause_element__()), "point_1.x * :x_1")
sess = fixture_session()
self.assert_compile(
sess.query(alias).filter(alias.double_x > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x * :x_1 > point.x",
)
def test_parententity_vs_parentmapper(self):
class Point(object):
pass
self._fixture(Point, properties={"x_syn": synonym("x")})
pa = aliased(Point)
is_(Point.x_syn._parententity, inspect(Point))
is_(Point.x._parententity, inspect(Point))
is_(Point.x_syn._parentmapper, inspect(Point))
is_(Point.x._parentmapper, inspect(Point))
is_(
Point.x_syn.__clause_element__()._annotations["parententity"],
inspect(Point),
)
is_(
Point.x.__clause_element__()._annotations["parententity"],
inspect(Point),
)
is_(
Point.x_syn.__clause_element__()._annotations["parentmapper"],
inspect(Point),
)
is_(
Point.x.__clause_element__()._annotations["parentmapper"],
inspect(Point),
)
pa = aliased(Point)
is_(pa.x_syn._parententity, inspect(pa))
is_(pa.x._parententity, inspect(pa))
is_(pa.x_syn._parentmapper, inspect(Point))
is_(pa.x._parentmapper, inspect(Point))
is_(
pa.x_syn.__clause_element__()._annotations["parententity"],
inspect(pa),
)
is_(
pa.x.__clause_element__()._annotations["parententity"], inspect(pa)
)
is_(
pa.x_syn.__clause_element__()._annotations["parentmapper"],
inspect(Point),
)
is_(
pa.x.__clause_element__()._annotations["parentmapper"],
inspect(Point),
)
class IdentityKeyTest(_fixtures.FixtureTest):
run_inserts = None
def _cases():
return testing.combinations(
(orm_util,),
(Session,),
)
@_cases()
def test_identity_key_1(self, ormutil):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
key = ormutil.identity_key(User, [1])
eq_(key, (User, (1,), None))
key = ormutil.identity_key(User, ident=[1])
eq_(key, (User, (1,), None))
@_cases()
def test_identity_key_scalar(self, ormutil):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
key = ormutil.identity_key(User, 1)
eq_(key, (User, (1,), None))
key = ormutil.identity_key(User, ident=1)
eq_(key, (User, (1,), None))
@_cases()
def test_identity_key_2(self, ormutil):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
s = fixture_session()
u = User(name="u1")
s.add(u)
s.flush()
key = ormutil.identity_key(instance=u)
eq_(key, (User, (u.id,), None))
@_cases()
def test_identity_key_3(self, ormutil):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
row = {users.c.id: 1, users.c.name: "Frank"}
key = ormutil.identity_key(User, row=row)
eq_(key, (User, (1,), None))
def test_identity_key_token(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
key = orm_util.identity_key(User, [1], identity_token="token")
eq_(key, (User, (1,), "token"))
key = orm_util.identity_key(User, ident=[1], identity_token="token")
eq_(key, (User, (1,), "token"))
class PathRegistryTest(_fixtures.FixtureTest):
run_setup_mappers = "once"
run_inserts = None
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_root_registry(self):
umapper = inspect(self.classes.User)
is_(RootRegistry()[umapper], umapper._path_registry)
eq_(RootRegistry()[umapper], PathRegistry.coerce((umapper,)))
def test_expand(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce((umapper,))
eq_(
path[umapper.attrs.addresses][amapper][
amapper.attrs.email_address
],
PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
),
)
def test_entity_boolean(self):
umapper = inspect(self.classes.User)
path = PathRegistry.coerce((umapper,))
is_(bool(path), True)
def test_key_boolean(self):
umapper = inspect(self.classes.User)
path = PathRegistry.coerce((umapper, umapper.attrs.addresses))
is_(bool(path), True)
def test_aliased_class(self):
User = self.classes.User
ua = aliased(User)
ua_insp = inspect(ua)
path = PathRegistry.coerce((ua_insp, ua_insp.mapper.attrs.addresses))
assert path.parent.is_aliased_class
def test_indexed_entity(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
is_(path[0], umapper)
is_(path[2], amapper)
def test_indexed_key(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
eq_(path[1], umapper.attrs.addresses)
eq_(path[3], amapper.attrs.email_address)
def test_slice(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
eq_(path[1:3], (umapper.attrs.addresses, amapper))
def test_addition(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
eq_(
p1 + p2,
PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
),
)
def test_length(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
pneg1 = PathRegistry.coerce(())
p0 = PathRegistry.coerce((umapper,))
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
eq_(len(pneg1), 0)
eq_(len(p0), 1)
eq_(len(p1), 2)
eq_(len(p2), 3)
eq_(len(p3), 4)
eq_(pneg1.length, 0)
eq_(p0.length, 1)
eq_(p1.length, 2)
eq_(p2.length, 3)
eq_(p3.length, 4)
def test_eq(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
u_alias = inspect(aliased(self.classes.User))
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p3 = PathRegistry.coerce((umapper, umapper.attrs.name))
p4 = PathRegistry.coerce((u_alias, umapper.attrs.addresses))
p5 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p6 = PathRegistry.coerce(
(amapper, amapper.attrs.user, umapper, umapper.attrs.addresses)
)
p7 = PathRegistry.coerce(
(
amapper,
amapper.attrs.user,
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
is_(p1 == p2, True)
is_(p1 == p3, False)
is_(p1 == p4, False)
is_(p1 == p5, False)
is_(p6 == p7, False)
is_(p6 == p7.parent.parent, True)
is_(p1 != p2, False)
is_(p1 != p3, True)
is_(p1 != p4, True)
is_(p1 != p5, True)
def test_eq_non_path(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
u_alias = inspect(aliased(self.classes.User))
p1 = PathRegistry.coerce((umapper,))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p3 = PathRegistry.coerce((u_alias, umapper.attrs.addresses))
p4 = PathRegistry.coerce((u_alias, umapper.attrs.addresses, amapper))
p5 = PathRegistry.coerce((u_alias,)).token(":*")
non_object = 54.1432
for obj in [p1, p2, p3, p4, p5]:
with expect_warnings(
"Comparison of PathRegistry to "
"<.* 'float'> is not supported"
):
is_(obj == non_object, False)
with expect_warnings(
"Comparison of PathRegistry to "
"<.* 'float'> is not supported"
):
is_(obj != non_object, True)
def test_contains_mapper(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
assert p1.contains_mapper(umapper)
assert not p1.contains_mapper(amapper)
def test_path(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
eq_(p1.path, (umapper, umapper.attrs.addresses))
eq_(p2.path, (umapper, umapper.attrs.addresses, amapper))
eq_(p3.path, (amapper, amapper.attrs.email_address))
def test_registry_set(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
p1.set(reg, "p1key", "p1value")
p2.set(reg, "p2key", "p2value")
p3.set(reg, "p3key", "p3value")
eq_(
reg,
{
("p1key", p1.path): "p1value",
("p2key", p2.path): "p2value",
("p3key", p3.path): "p3value",
},
)
def test_registry_get(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
reg.update(
{
("p1key", p1.path): "p1value",
("p2key", p2.path): "p2value",
("p3key", p3.path): "p3value",
}
)
eq_(p1.get(reg, "p1key"), "p1value")
eq_(p2.get(reg, "p2key"), "p2value")
eq_(p2.get(reg, "p1key"), None)
eq_(p3.get(reg, "p3key"), "p3value")
eq_(p3.get(reg, "p1key"), None)
def test_registry_contains(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
reg.update(
{
("p1key", p1.path): "p1value",
("p2key", p2.path): "p2value",
("p3key", p3.path): "p3value",
}
)
assert p1.contains(reg, "p1key")
assert not p1.contains(reg, "p2key")
assert p3.contains(reg, "p3key")
assert not p2.contains(reg, "fake")
def test_registry_setdefault(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
reg.update({("p1key", p1.path): "p1value"})
p1.setdefault(reg, "p1key", "p1newvalue_a")
p1.setdefault(reg, "p1key_new", "p1newvalue_b")
p2.setdefault(reg, "p2key", "p2newvalue")
eq_(
reg,
{
("p1key", p1.path): "p1value",
("p1key_new", p1.path): "p1newvalue_b",
("p2key", p2.path): "p2newvalue",
},
)
def test_serialize(self):
User = self.classes.User
Address = self.classes.Address
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
eq_(p1.serialize(), [(User, "addresses"), (Address, "email_address")])
eq_(p2.serialize(), [(User, "addresses"), (Address, None)])
eq_(p3.serialize(), [(User, "addresses")])
def test_serialize_context_dict(self):
reg = util.OrderedDict()
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
p1.set(reg, "p1key", "p1value")
p2.set(reg, "p2key", "p2value")
p3.set(reg, "p3key", "p3value")
eq_(
reg,
{
("p1key", p1.path): "p1value",
("p2key", p2.path): "p2value",
("p3key", p3.path): "p3value",
},
)
serialized = PathRegistry.serialize_context_dict(
reg, ("p1key", "p2key")
)
eq_(
serialized,
[
(("p1key", p1.serialize()), "p1value"),
(("p2key", p2.serialize()), "p2value"),
],
)
def test_deseralize_context_dict(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
serialized = [
(("p1key", p1.serialize()), "p1value"),
(("p2key", p2.serialize()), "p2value"),
(("p3key", p3.serialize()), "p3value"),
]
deserialized = PathRegistry.deserialize_context_dict(serialized)
eq_(
deserialized,
{
("p1key", p1.path): "p1value",
("p2key", p2.path): "p2value",
("p3key", p3.path): "p3value",
},
)
def test_deseralize(self):
User = self.classes.User
Address = self.classes.Address
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
eq_(
PathRegistry.deserialize(
[(User, "addresses"), (Address, "email_address")]
),
p1,
)
eq_(
PathRegistry.deserialize([(User, "addresses"), (Address, None)]),
p2,
)
eq_(PathRegistry.deserialize([(User, "addresses")]), p3)
class PathRegistryInhTest(_poly_fixtures._Polymorphic):
run_setup_mappers = "once"
run_inserts = None
run_deletes = None
def test_plain(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
pmapper = inspect(Person)
emapper = inspect(Engineer)
p1 = PathRegistry.coerce((pmapper, emapper.attrs.machines))
# given a mapper and an attribute on a subclass,
# the path converts what you get to be against that subclass
eq_(p1.path, (emapper, emapper.attrs.machines))
def test_plain_compound(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
cmapper = inspect(Company)
pmapper = inspect(Person)
emapper = inspect(Engineer)
p1 = PathRegistry.coerce(
(cmapper, cmapper.attrs.employees, pmapper, emapper.attrs.machines)
)
# given a mapper and an attribute on a subclass,
# the path converts what you get to be against that subclass
eq_(
p1.path,
(
cmapper,
cmapper.attrs.employees,
emapper,
emapper.attrs.machines,
),
)
def test_plain_aliased(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
emapper = inspect(Engineer)
p_alias = aliased(Person)
p_alias = inspect(p_alias)
p1 = PathRegistry.coerce((p_alias, emapper.attrs.machines))
# plain AliasedClass - the path keeps that AliasedClass directly
# as is in the path
eq_(p1.path, (p_alias, emapper.attrs.machines))
def test_plain_aliased_compound(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
cmapper = inspect(Company)
emapper = inspect(Engineer)
c_alias = aliased(Company)
p_alias = aliased(Person)
c_alias = inspect(c_alias)
p_alias = inspect(p_alias)
p1 = PathRegistry.coerce(
(c_alias, cmapper.attrs.employees, p_alias, emapper.attrs.machines)
)
# plain AliasedClass - the path keeps that AliasedClass directly
# as is in the path
eq_(
p1.path,
(
c_alias,
cmapper.attrs.employees,
p_alias,
emapper.attrs.machines,
),
)
def test_with_poly_sub(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
emapper = inspect(Engineer)
cmapper = inspect(Company)
p_poly = with_polymorphic(Person, [Engineer])
e_poly_insp = inspect(p_poly.Engineer) # noqa - used by comment below
p_poly_insp = inspect(p_poly)
p1 = PathRegistry.coerce((p_poly_insp, emapper.attrs.machines))
# changes as of #5082: when a with_polymorphic is in the middle
# of a path, the natural path makes sure it uses the base mappers,
# however when it's at the root, the with_polymorphic stays in
# the natural path
# this behavior is the same as pre #5082, it was temporarily changed
# but this proved to be incorrect. The path starts on a
# with_polymorphic(), so a Query will "naturally" construct a path
# that comes from that wp.
eq_(p1.path, (e_poly_insp, emapper.attrs.machines))
eq_(p1.natural_path, (e_poly_insp, emapper.attrs.machines))
# this behavior is new as of the final version of #5082.
# the path starts on a normal entity and has a with_polymorphic
# in the middle, for this to match what Query will generate it needs
# to use the non aliased mappers in the natural path.
p2 = PathRegistry.coerce(
(
cmapper,
cmapper.attrs.employees,
p_poly_insp,
emapper.attrs.machines,
)
)
eq_(
p2.path,
(
cmapper,
cmapper.attrs.employees,
e_poly_insp,
emapper.attrs.machines,
),
)
eq_(
p2.natural_path,
(
cmapper,
cmapper.attrs.employees,
emapper,
emapper.attrs.machines,
),
)
def test_with_poly_base_two(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
cmapper = inspect(Company)
pmapper = inspect(Person)
p_poly = with_polymorphic(Person, [Engineer])
e_poly_insp = inspect(p_poly.Engineer) # noqa - used by comment below
p_poly_insp = inspect(p_poly)
p1 = PathRegistry.coerce(
(
cmapper,
cmapper.attrs.employees,
p_poly_insp,
pmapper.attrs.paperwork,
)
)
eq_(
p1.path,
(
cmapper,
cmapper.attrs.employees,
p_poly_insp,
pmapper.attrs.paperwork,
),
)
eq_(
p1.natural_path,
(
cmapper,
cmapper.attrs.employees,
pmapper,
pmapper.attrs.paperwork,
),
)
def test_nonpoly_oftype_aliased_subclass_onroot(self):
Engineer = _poly_fixtures.Engineer
eng_alias = aliased(Engineer)
ea_insp = inspect(eng_alias)
p1 = PathRegistry.coerce((ea_insp, ea_insp.mapper.attrs.paperwork))
eq_(p1.path, (ea_insp, ea_insp.mapper.attrs.paperwork))
eq_(p1.natural_path, (ea_insp, ea_insp.mapper.attrs.paperwork))
def test_nonpoly_oftype_aliased_subclass(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
cmapper = inspect(Company)
pmapper = inspect(Person)
eng_alias = aliased(Engineer)
ea_insp = inspect(eng_alias)
p1 = PathRegistry.coerce(
(
cmapper,
cmapper.attrs.employees,
ea_insp,
ea_insp.mapper.attrs.paperwork,
)
)
eq_(
p1.path,
(
cmapper,
cmapper.attrs.employees,
ea_insp,
ea_insp.mapper.attrs.paperwork,
),
)
eq_(
p1.natural_path,
(
cmapper,
cmapper.attrs.employees,
pmapper,
pmapper.attrs.paperwork,
),
)
def test_nonpoly_oftype_subclass(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
emapper = inspect(Engineer)
cmapper = inspect(Company)
pmapper = inspect(Person)
p1 = PathRegistry.coerce(
(
cmapper,
cmapper.attrs.employees,
emapper,
emapper.attrs.paperwork,
)
)
eq_(
p1.path,
(
cmapper,
cmapper.attrs.employees,
pmapper,
pmapper.attrs.paperwork,
),
)
eq_(
p1.natural_path,
(
cmapper,
cmapper.attrs.employees,
pmapper,
pmapper.attrs.paperwork,
),
)
def test_with_poly_base_one(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
pmapper = inspect(Person)
emapper = inspect(Engineer)
p_poly = with_polymorphic(Person, [Engineer])
p_poly = inspect(p_poly)
# "name" is actually on Person, not Engineer
p1 = PathRegistry.coerce((p_poly, emapper.attrs.name))
# polymorphic AliasedClass - because "name" is on Person,
# we get Person, not Engineer
eq_(p1.path, (p_poly, pmapper.attrs.name))
def test_with_poly_use_mapper(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
emapper = inspect(Engineer)
p_poly = with_polymorphic(Person, [Engineer], _use_mapper_path=True)
p_poly = inspect(p_poly)
p1 = PathRegistry.coerce((p_poly, emapper.attrs.machines))
# polymorphic AliasedClass with the "use_mapper_path" flag -
# the AliasedClass acts just like the base mapper
eq_(p1.path, (emapper, emapper.attrs.machines))
| |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter, OrderedDict
import pandas as pd
import numpy as np
import random
import os
import pickle
import time
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from scipy.spatial import distance
# this ensures TensorFlow doesn't use all GPU memory with a
# single graph (thus preventing other TF graphs from utilizing GPU)
GPU_MEM_CONFIG = tf.ConfigProto(gpu_options={'allow_growth': True})
class W2VModelDownload:
def __init__(self, bq_project):
# no-op
self.bq_project = bq_project
def download_w2v_model(self, landscape_bucket, model_name):
from google.cloud import storage
"""
Download a pre-trained Word2Vec model.
:param models_url: The URL where models are stored
:param model_name: The name of the model to download
"""
checkpoint_list_file = '/'.join(['models', model_name, 'checkpoints', 'checkpoint'])
if os.path.exists(checkpoint_list_file):
print('Model {} already exists. Using local copy.'.format(model_name))
return
client = storage.Client(project=self.bq_project)
bucket = client.bucket('patent_landscapes')
blob = bucket.blob(checkpoint_list_file)
checkpoints = blob.download_as_string(client=client).decode()
checkpoint_file = 'n/a'
for checkpoint in checkpoints.split('\n'):
if checkpoint.startswith('model_checkpoint_path'):
checkpoint_file = checkpoint.split(': ')[1].replace('"', '')
break
blobs_list = bucket.list_blobs(prefix='/'.join(['models', model_name, 'checkpoints', checkpoint_file]))
checkpoints_files = []
for blob_item in blobs_list:
checkpoints_files.append(blob_item.name)
if checkpoint_file == 'n/a':
raise ValueError('Unable to find checkpoint for model {}!'.format(model_name))
checkpoint_path = '/'.join(['checkpoints', checkpoint_file])
model_base_local_path = '/'.join(['models', model_name])
local_dirs = ['checkpoints', 'vocab']
files = checkpoints_files + [
'/'.join(['models', model_name, 'train_words.pkl']),
'/'.join(['models', model_name, 'checkpoints/checkpoint']),
'/'.join(['models', model_name, 'vocab/config.csv']),
'/'.join(['models', model_name, 'vocab/vocab.csv']),
]
for storage_dir in local_dirs:
local_model_storage_dir = '/'.join([model_base_local_path, storage_dir])
if not os.path.exists(local_model_storage_dir):
os.makedirs(local_model_storage_dir)
for file_path in files:
#file_path = os.path.join('models', model_name, file)
if os.path.exists(file_path):
print('Not downloading {}; already exists'.format(file_path))
continue
blob_file = bucket.blob(file_path)
print('Downloading {}'.format(file_path))
blob_file.download_to_filename(file_path)
print('Completed downloading {} files'.format(model_name))
class TrainedW2VRuntime:
w2v_graph = None
index_to_word = None
word_to_index = None
embedding_weights = None
normed_embedding_weights = None
def __init__(
self,
w2v_graph,
index_to_word,
word_to_index,
embedding_weights,
normed_embedding_weights):
'''
'''
self.w2v_graph = w2v_graph
self.index_to_word = index_to_word
self.word_to_index = word_to_index
self.embedding_weights = embedding_weights
self.normed_embedding_weights = normed_embedding_weights
def visualize_embeddings(self, num_words=500):
'''
Creates a matplotlib plot of the first 'num_words' words using TSNE to see how 'close'
each of the words are in the embedding space.
Note that TSNE uses SGD and therefore this method will not always produce the
exact same visualization even on identical input.
'''
tsne = TSNE()
embed_tsne = tsne.fit_transform(self.normed_embedding_weights[:num_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(num_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(self.index_to_word[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
def find_similar(self, word, top_k):
'''
Finds the top_k most similar words to the provided word, as determined by calculating
the cosine distance between the word and the rest of the embedded words, sorting the
distance, and finally taking only the top_k results.
Note: this should be used for debugging or illustrative purposes only; it's slow!
'''
distances = {}
word1_index = self.word_to_index[word]
word1_embed = self.embedding_weights[word1_index]
for index in range(0, len(self.embedding_weights)):
if index != word1_index:
word2_embed = self.embedding_weights[index]
word_dist = distance.cosine(word1_embed, word2_embed)
distances[index] = word_dist
top_k_similar = sorted(distances.items(), key=lambda x:x[1])[:top_k]
similar_words = []
for i in range(0, len(top_k_similar)):
similar_word_index = top_k_similar[i][0]
similar_word_dist = top_k_similar[i][1]
similar_word = self.index_to_word[similar_word_index]
similar_words.append(
{'word': similar_word,
'index': similar_word_index,
'distance': similar_word_dist})
return similar_words
def load_embedding(self, word):
'''
'''
if word in self.word_to_index:
word_idx = self.word_to_index[word]
else:
print("Couldn't find {}. Using UNK instead. If this sounds wrong, consider normalizing text.".format(word))
word_idx = self.word_to_index['UNK']
return self.embedding_weights[word_idx]
class W2VGraph:
train_graph = None
inputs = None
labels = None
embedding = None
normalized_embedding = None
loss = None
cost = None
optimizer = None
similarity = None
valid_size = None
valid_window = None
valid_examples = None
def __init__(
self,
train_graph, inputs, labels, embedding, normalized_embedding, loss, cost, optimizer,
similarity,
valid_size,
valid_window,
valid_examples):
self.train_graph = train_graph
self.inputs = inputs
self.labels = labels
self.embedding = embedding
self.normalized_embedding = normalized_embedding
self.loss = loss
self.cost = cost
self.optimizer = optimizer
self.similarity = similarity
self.valid_size = valid_size
self.valid_window = valid_window
self.valid_examples = valid_examples
class Word2Vec:
models_path = 'models'
checkpoint_file = None
model_name = None
checkpoints_path = None
vocab_dir = None
vocab_file = None
config_file = None
train_words_path = None
vocabulary_size = 150
subsample_threshold = 1e-5
negative_samples = 750
w2v_runtime = None
def __init__(self, model_name, vocab_size=50000, subsample_threshold=1e-5):
print('Will use models/{} directory to load/persist model information.'.format(model_name))
self.model_name = model_name
self.checkpoints_path = os.path.join(
self.models_path, self.model_name, 'checkpoints')
self.checkpoint_file = os.path.join(
self.checkpoints_path, '{}.ckpt'.format(self.model_name))
self.vocab_dir = os.path.join(
self.models_path, self.model_name, 'vocab')
self.vocab_file = os.path.join(self.vocab_dir, 'vocab.csv')
self.config_file = os.path.join(self.vocab_dir, 'config.csv')
self.train_words_path = os.path.join(self.models_path, self.model_name, 'train_words.pkl')
if vocab_size > 0:
self.vocabulary_size = vocab_size
if subsample_threshold > 0:
self.subsample_threshold = subsample_threshold
def preprocess_sequential_words(
self,
sequential_words,
min_wordcount=10):
vocab_to_int, int_to_vocab, int_words, int_word_counts = \
self.create_lookup_tables(words, vocabulary_size)
total_wordcount = len(int_words)
print('Most common words: ', [word for word in int_to_vocab.values()][0:5])
train_words = self.subsample_words(
self.subsample_threshold, int_words, int_word_counts, total_wordcount)
print("Total words in corpus: {}, vocab size: {}, num words used for training: {}".format(
total_wordcount, len(int_word_counts), len(train_words)))
return vocab_to_int, int_to_vocab, int_words, int_word_counts, train_words
def create_lookup_tables(self, words, vocab_size, min_wordcount=10):
print('Generating wordcounts')
word_counts = Counter(words)
print('Filtering words with counts < {}'.format(min_wordcount))
words = [word for word in words if word_counts[word] >= min_wordcount]
word_counts = Counter(words)
if len(word_counts) >= vocab_size:
print('reducing word count from {} to top {} words'.format(len(word_counts), vocab_size))
word_counts = OrderedDict(word_counts.most_common(vocab_size - 1))
words = [word for word in words if word in word_counts]
else:
print('keeping word count at {} (max set as {})'.format(len(word_counts), vocab_size))
word_counts['UNK'] = 1
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)}
vocab_to_int = {word: ii for ii, word in int_to_vocab.items()}
int_words = [vocab_to_int[word] for word in words]
int_word_counts = Counter(int_words)
return vocab_to_int, int_to_vocab, int_words, int_word_counts
# this taken from milikov et al paper
def prob_keep(self, threshold, int_word, freqs):
return 1 - np.sqrt(threshold / freqs[int_word])
def subsample_words(self, threshold, int_words, int_word_counts, total_wordcount):
# calculate relative frequencies of each word in the corpus
freqs = {word: count/total_wordcount for word, count in int_word_counts.items()}
# calculate the probability that we should keep a word, based on the threshold
int_word_probs = [self.prob_keep(threshold, int_word, freqs) for int_word in set(int_words)]
# generate the set of words to use for training data, taking into account the
# probabilities generated for each word
train_words = [int_word for int_word in int_words if (int_word_probs[int_word] < random.random())]
return train_words
def save_vocab_mapping(self, int_to_vocab):
'''
Saves the mapping from word index -> word string to disk. The reverse mapping can be
derived from this data, so no need to persist both.
'''
if not os.path.isdir(self.vocab_dir):
print('Creating directory to store vocab/config files: {}'.format(self.vocab_dir))
os.makedirs(self.vocab_dir)
vocab_df = pd.DataFrame.from_dict(int_to_vocab, orient='index')
vocab_df.columns = ['word']
vocab_df.to_csv(self.vocab_file)
def save_model_config(self, config_dict):
if not os.path.isdir(self.vocab_dir):
print('Creating directory to store vocab/config files: {}'.format(self.vocab_dir))
os.makedirs(self.vocab_dir)
pd.DataFrame.from_dict(config_dict, orient='index').to_csv(self.config_file)
def load_vocab_mappings(self):
'''
Loads a CSV with a mapping from
'''
index_to_vocab_df = pd.read_csv(
self.vocab_file, keep_default_na=False, na_values=[], encoding='latin-1')
vocab_to_index_df = pd.read_csv(
self.vocab_file, index_col='word', keep_default_na=False, na_values=[], encoding='latin-1')
vocab_to_index_df.columns = ['index']
return index_to_vocab_df.to_dict()['word'], vocab_to_index_df.to_dict()['index']
def load_model_config(self):
config_df = pd.read_csv(self.config_file)
config_df.columns = ['name', 'value']
config_df = config_df.set_index(config_df['name'])['value']
return config_df.to_dict()
def save_train_words(self, train_words_indexes):
with open(self.train_words_path, 'wb') as fp:
pickle.dump(train_words_indexes, fp)
def load_train_words(self, file_path):
with open(file_path, 'rb') as f:
train_words_indexes = pickle.load(f)
return train_words_indexes
def get_target(self, words, idx, window_size=5):
''' Gets the window of words around a particular word (as referenced by its idx). '''
r = random.randint(1, window_size+1)
if (idx - r) < 0:
return words[0:idx+r+1]
return words[idx-r:idx+r+1]
def get_batches(self, words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words) // batch_size
# only full batches
words = words[:n_batches * batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx + batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = self.get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x] * len(batch_y))
yield x, y
################ TensorFlow-related Code ############################
def create_graph(self, vocab_size, embedding_size, negative_samples_for_loss):
'''
Creates the Word2Vec graph for use in training and restoring checkpoint files to
load embeddings. The method returns the graph, the embedding variable and the normalized
embedding variables that can be used to restore the embedding weights from the TF graph.
You should call this function like this:
graph, embedding, normalized_embedding = word2vec.create_graph(...params...)
'''
train_graph = tf.Graph()
n_vocab = vocab_size
n_embedding = embedding_size
n_sampled = negative_samples_for_loss
with train_graph.as_default():
inputs = tf.placeholder(tf.int32, [None])
labels = tf.placeholder(tf.int32, [None, None])
# create embedding weight matrix
embedding = tf.Variable(tf.random_uniform([n_vocab, n_embedding], minval=-1, maxval=1))
# gets the hidden layer output (i.e. the embedding)
embed = tf.nn.embedding_lookup(embedding, inputs)
softmax_w = tf.Variable(tf.truncated_normal((n_vocab, n_embedding), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(n_vocab))
# For each word, we need to sample for negative training data
# (i.e., words not in window) for calculating loss and backprop
# This calculates the loss using negative sampling
loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b, labels, embed, n_sampled, n_vocab)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Validation dataset
# TODO: parameterize this
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Uses cosine distance to find similarity of matrix elements
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
w2v_graph = W2VGraph(
train_graph,
inputs,
labels,
embedding,
normalized_embedding,
loss,
cost,
optimizer,
similarity,
valid_size,
valid_window,
valid_examples)
return w2v_graph #train_graph, inputs, labels, embedding, normalized_embedding
def restore_runtime(self):
'''
Loads the latest checkpoint file for this model into the provided graph,
returning the embedding weights and normalized embedding weights.
You should use the normalized embedding weights for your embeddings.
'''
index_to_word, word_to_index = self.load_vocab_mappings()
model_config = self.load_model_config()
embedding_size = int(model_config['embedding_size'])
loss_sampling_size = int(model_config['loss_sampling_size'])
w2v_graph = \
self.create_graph(len(index_to_word), embedding_size, loss_sampling_size)
with tf.Session(graph=w2v_graph.train_graph, config=GPU_MEM_CONFIG) as sess:
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint(self.checkpoints_path))
embedding_weights, normed_embedding_weights = \
sess.run([w2v_graph.embedding, w2v_graph.normalized_embedding])
return TrainedW2VRuntime(w2v_graph, index_to_word, word_to_index, embedding_weights, normed_embedding_weights)
#return w2v_graph.train_graph, index_to_word, word_to_index, embedding_weights, normed_embedding_weights
def train(self, w2v_graph, int_to_vocab, train_words, epochs, batch_size, window_size):
if not os.path.isdir(self.checkpoints_path):
print('Creating checkpoints directory to store model ckpt files.')
os.makedirs(self.checkpoints_path)
with w2v_graph.train_graph.as_default():
saver = tf.train.Saver()
iteration = 1
with tf.Session(graph=w2v_graph.train_graph, config=GPU_MEM_CONFIG) as sess:
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
start_epoch = time.time()
batches = self.get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {w2v_graph.inputs: x,
w2v_graph.labels: np.array(y)[:, None]}
train_loss, _ = sess.run([w2v_graph.cost, w2v_graph.optimizer], feed_dict=feed)
loss += train_loss
if iteration % 500 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/500),
"{:.4f} sec/batch".format((end-start)/500))
loss = 0
start = time.time()
if iteration % 2500 == 0:
## From Thushan Ganegedara's implementation
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = w2v_graph.similarity.eval()
for i in range(w2v_graph.valid_size):
valid_word = int_to_vocab[w2v_graph.valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
if iteration % 25000 == 0:
save_path = saver.save(sess, self.checkpoint_file, global_step=iteration)
iteration += 1
epoch_time = time.time() - start_epoch
print('{:.4f} seconds ({:.4f} minutes) for full epoch'.format(epoch_time, epoch_time/60))
save_path = saver.save(sess, self.checkpoint_file, global_step=iteration)
embed_mat = sess.run(w2v_graph.normalized_embedding)
def prep_train_and_save_model(
self,
sequential_words_corpus,
vocabulary_size,
embedding_size,
num_epochs,
batch_size,
window_size):
'''
TODO: document me
'''
words = sequential_words_corpus
n_embedding = embedding_size
n_sampled = self.negative_samples
vocab_to_int, int_to_vocab, int_words, int_word_counts = \
self.create_lookup_tables(words, vocabulary_size)
total_wordcount = len(int_words)
print('Most common words: ', [word for word in int_to_vocab.values()][0:10])
print('Least common words: ', [word for word in int_to_vocab.values()][-10:])
train_words = self.subsample_words(
self.subsample_threshold, int_words, int_word_counts, total_wordcount)
print("Total words in corpus: {}, vocab size: {}, num words used for training: {}".format(
total_wordcount, len(int_word_counts), len(train_words)))
# after preprocessing, save things off to disk so we can restore settings later
print('Saving model config, vocab word-to-index mapping, and word corpus to models/{}.'.format(self.model_name))
self.save_vocab_mapping(int_to_vocab)
self.save_model_config({'embedding_size': n_embedding, 'loss_sampling_size': n_sampled})
self.save_train_words(train_words)
print('Creating TF graph.')
w2v_graph = self.create_graph(vocabulary_size, embedding_size, n_sampled)
print('Training model for {} epochs.'.format(num_epochs))
self.train(w2v_graph, int_to_vocab, train_words, num_epochs, batch_size, window_size)
return w2v_graph.train_graph, w2v_graph.embedding, w2v_graph.normalized_embedding
| |
# Copyright 2014 Arista Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from neutron_lib.agent import topics
from neutron_lib import constants as n_const
from neutron_lib import context as nctx
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from neutron_lib import rpc as n_rpc
from neutron_lib.services import base as service_base
from neutron_lib import worker
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.rpc.handlers import l3_rpc
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_gwmode_db
from neutron.plugins.ml2.driver_context import NetworkContext # noqa
from networking_arista._i18n import _LE, _LI
from networking_arista.l3Plugin import arista_l3_driver
LOG = logging.getLogger(__name__)
class AristaL3SyncWorker(worker.BaseWorker):
def __init__(self, driver):
self.driver = driver
self._enable_cleanup = driver._enable_cleanup
self._protected_vlans = driver._protected_vlans
self._servers = driver._servers
self._use_vrf = driver._use_vrf
self._loop = None
super(AristaL3SyncWorker, self).__init__(worker_process_count=0)
def start(self):
super(AristaL3SyncWorker, self).start()
if self._loop is None:
self._loop = loopingcall.FixedIntervalLoopingCall(
self.synchronize
)
self._loop.start(interval=cfg.CONF.l3_arista.l3_sync_interval)
def stop(self):
if self._loop is not None:
self._loop.stop()
def wait(self):
if self._loop is not None:
self._loop.wait()
self._loop = None
def reset(self):
self.stop()
self.wait()
self.start()
def get_subnet_info(self, subnet_id):
return self.get_subnet(subnet_id)
def get_routers_and_interfaces(self):
core = directory.get_plugin()
ctx = nctx.get_admin_context()
routers = directory.get_plugin(plugin_constants.L3).get_routers(ctx)
router_interfaces = list()
for r in routers:
ports = core.get_ports(
ctx,
filters={
'device_id': [r['id']],
'device_owner': [n_const.DEVICE_OWNER_ROUTER_INTF]}) or []
for p in ports:
router_interface = r.copy()
net_id = p['network_id']
subnet_id = p['fixed_ips'][0]['subnet_id']
subnet = core.get_subnet(ctx, subnet_id)
ml2_db = NetworkContext(self, ctx, {'id': net_id})
seg_id = ml2_db.network_segments[0]['segmentation_id']
router_interface['seg_id'] = seg_id
router_interface['cidr'] = subnet['cidr']
router_interface['gip'] = subnet['gateway_ip']
router_interface['ip_version'] = subnet['ip_version']
router_interface['subnet_id'] = subnet_id
router_interfaces.append(router_interface)
return routers, router_interfaces
def synchronize(self):
"""Synchronizes Router DB from Neturon DB with EOS.
Walks through the Neturon Db and ensures that all the routers
created in Netuton DB match with EOS. After creating appropriate
routers, it ensures to add interfaces as well.
Uses idempotent properties of EOS configuration, which means
same commands can be repeated.
"""
LOG.info(_LI('Syncing Neutron Router DB <-> EOS'))
routers, router_interfaces = self.get_routers_and_interfaces()
expected_vrfs = set()
if self._use_vrf:
expected_vrfs.update(self.driver._arista_router_name(
r['id'], r['name']) for r in routers)
expected_vlans = set(r['seg_id'] for r in router_interfaces)
if self._enable_cleanup:
self.do_cleanup(expected_vrfs, expected_vlans)
self.create_routers(routers)
self.create_router_interfaces(router_interfaces)
def get_vrfs(self, server):
ret = self.driver._run_eos_cmds(['show vrf'], server)
if len(ret or []) != 1 or 'vrfs' not in ret[0].keys():
return set()
eos_vrfs = set(vrf for vrf in ret[0]['vrfs'].keys()
if vrf.startswith('__OpenStack__'))
return eos_vrfs
def get_svis(self, server):
ret = self.driver._run_eos_cmds(['show ip interface'], server)
if len(ret or []) != 1 or 'interfaces' not in ret[0].keys():
return set()
eos_svis = set(
int(vlan.strip('Vlan'))
for vlan in ret[0]['interfaces'].keys() if 'Vlan' in vlan)
return eos_svis
def get_vlans(self, server):
ret = self.driver._run_eos_cmds(['show vlan'], server)
if len(ret or []) != 1 or 'vlans' not in ret[0].keys():
return set()
eos_vlans = set(int(vlan) for vlan, info in ret[0]['vlans'].items()
if not info['dynamic'])
return eos_vlans
def do_cleanup(self, expected_vrfs, expected_vlans):
for server in self._servers:
eos_svis = self.get_svis(server)
eos_vlans = self.get_vlans(server)
svis_to_delete = (eos_svis - self._protected_vlans
- expected_vlans)
vlans_to_delete = (eos_vlans - self._protected_vlans
- expected_vlans)
delete_cmds = []
delete_cmds.extend('no interface vlan %s' % svi
for svi in svis_to_delete)
delete_cmds.extend('no vlan %s' % vlan
for vlan in vlans_to_delete)
if self._use_vrf:
eos_vrfs = self.get_vrfs(server)
vrfs_to_delete = eos_vrfs - expected_vrfs
delete_cmds.extend(['no vrf definition %s' % vrf
for vrf in vrfs_to_delete])
if delete_cmds:
self.driver._run_config_cmds(delete_cmds, server)
def create_routers(self, routers):
for r in routers:
try:
self.driver.create_router(self, r)
except Exception:
LOG.error(_LE("Error Adding router %(router_id)s "
"on Arista HW"), {'router_id': r})
def create_router_interfaces(self, router_interfaces):
for r in router_interfaces:
try:
self.driver.add_router_interface(self, r)
except Exception:
LOG.error(_LE("Error Adding interface %(subnet_id)s "
"to router %(router_id)s on Arista HW"),
{'subnet_id': r['subnet_id'], 'router_id': r['id']})
class AristaL3ServicePlugin(service_base.ServicePluginBase,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin):
"""Implements L3 Router service plugin for Arista hardware.
Creates routers in Arista hardware, manages them, adds/deletes interfaces
to the routes.
"""
supported_extension_aliases = ["router", "ext-gw-mode",
"extraroute"]
def __init__(self, driver=None):
super(AristaL3ServicePlugin, self).__init__()
self.driver = driver or arista_l3_driver.AristaL3Driver()
self.setup_rpc()
self.add_worker(AristaL3SyncWorker(self.driver))
def setup_rpc(self):
# RPC support
self.topic = topics.L3PLUGIN
self.conn = n_rpc.Connection()
self.agent_notifiers.update(
{n_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()})
self.endpoints = [l3_rpc.L3RpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
self.conn.consume_in_threads()
def get_plugin_type(self):
return plugin_constants.L3
def get_plugin_description(self):
"""Returns string description of the plugin."""
return ("Arista L3 Router Service Plugin for Arista Hardware "
"based routing")
@log_helpers.log_method_call
def create_router(self, context, router):
"""Create a new router entry in DB, and create it Arista HW."""
# Add router to the DB
new_router = super(AristaL3ServicePlugin, self).create_router(
context,
router)
# create router on the Arista Hw
try:
self.driver.create_router(context, new_router)
return new_router
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error creating router on Arista HW router=%s "),
new_router)
super(AristaL3ServicePlugin, self).delete_router(
context,
new_router['id']
)
@log_helpers.log_method_call
def update_router(self, context, router_id, router):
"""Update an existing router in DB, and update it in Arista HW."""
# Read existing router record from DB
original_router = self.get_router(context, router_id)
# Update router DB
new_router = super(AristaL3ServicePlugin, self).update_router(
context, router_id, router)
# Modify router on the Arista Hw
try:
self.driver.update_router(context, router_id,
original_router, new_router)
return new_router
except Exception:
LOG.error(_LE("Error updating router on Arista HW router=%s "),
new_router)
@log_helpers.log_method_call
def delete_router(self, context, router_id):
"""Delete an existing router from Arista HW as well as from the DB."""
router = self.get_router(context, router_id)
# Delete router on the Arista Hw
try:
self.driver.delete_router(context, router_id, router)
except Exception as e:
LOG.error(_LE("Error deleting router on Arista HW "
"router %(r)s exception=%(e)s"),
{'r': router, 'e': e})
super(AristaL3ServicePlugin, self).delete_router(context, router_id)
@log_helpers.log_method_call
def add_router_interface(self, context, router_id, interface_info):
"""Add a subnet of a network to an existing router."""
new_router = super(AristaL3ServicePlugin, self).add_router_interface(
context, router_id, interface_info)
core = directory.get_plugin()
# Get network info for the subnet that is being added to the router.
# Check if the interface information is by port-id or subnet-id
add_by_port, add_by_sub = self._validate_interface_info(interface_info)
if add_by_sub:
subnet = core.get_subnet(context, interface_info['subnet_id'])
elif add_by_port:
port = core.get_port(context, interface_info['port_id'])
subnet_id = port['fixed_ips'][0]['subnet_id']
subnet = core.get_subnet(context, subnet_id)
network_id = subnet['network_id']
# To create SVI's in Arista HW, the segmentation Id is required
# for this network.
ml2_db = NetworkContext(self, context, {'id': network_id})
seg_id = ml2_db.network_segments[0]['segmentation_id']
# Package all the info needed for Hw programming
router = self.get_router(context, router_id)
router_info = copy.deepcopy(new_router)
router_info['seg_id'] = seg_id
router_info['name'] = router['name']
router_info['cidr'] = subnet['cidr']
router_info['gip'] = subnet['gateway_ip']
router_info['ip_version'] = subnet['ip_version']
try:
self.driver.add_router_interface(context, router_info)
return new_router
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error Adding subnet %(subnet)s to "
"router %(router_id)s on Arista HW"),
{'subnet': subnet, 'router_id': router_id})
super(AristaL3ServicePlugin, self).remove_router_interface(
context,
router_id,
interface_info)
@log_helpers.log_method_call
def remove_router_interface(self, context, router_id, interface_info):
"""Remove a subnet of a network from an existing router."""
router_to_del = (
super(AristaL3ServicePlugin, self).remove_router_interface(
context,
router_id,
interface_info)
)
# Get network information of the subnet that is being removed
core = directory.get_plugin()
subnet = core.get_subnet(context, router_to_del['subnet_id'])
network_id = subnet['network_id']
# For SVI removal from Arista HW, segmentation ID is needed
ml2_db = NetworkContext(self, context, {'id': network_id})
seg_id = ml2_db.network_segments[0]['segmentation_id']
router = self.get_router(context, router_id)
router_info = copy.deepcopy(router_to_del)
router_info['seg_id'] = seg_id
router_info['name'] = router['name']
try:
self.driver.remove_router_interface(context, router_info)
return router_to_del
except Exception as exc:
LOG.error(_LE("Error removing interface %(interface)s from "
"router %(router_id)s on Arista HW"
"Exception =(exc)s"),
{'interface': interface_info, 'router_id': router_id,
'exc': exc})
| |
#!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
import sys
from operator import lt, gt, eq, le, ge
from os.path import (
abspath,
dirname,
join,
)
from distutils.version import StrictVersion
from setuptools import (
Extension,
find_packages,
setup,
)
import versioneer
class LazyBuildExtCommandClass(dict):
"""
Lazy command class that defers operations requiring Cython and numpy until
they've actually been downloaded and installed by setup_requires.
"""
def __contains__(self, key):
return (
key == 'build_ext'
or super(LazyBuildExtCommandClass, self).__contains__(key)
)
def __setitem__(self, key, value):
if key == 'build_ext':
raise AssertionError("build_ext overridden!")
super(LazyBuildExtCommandClass, self).__setitem__(key, value)
def __getitem__(self, key):
if key != 'build_ext':
return super(LazyBuildExtCommandClass, self).__getitem__(key)
from Cython.Distutils import build_ext as cython_build_ext
import numpy
# Cython_build_ext isn't a new-style class in Py2.
class build_ext(cython_build_ext, object):
"""
Custom build_ext command that lazily adds numpy's include_dir to
extensions.
"""
def build_extensions(self):
"""
Lazily append numpy's include directory to Extension includes.
This is done here rather than at module scope because setup.py
may be run before numpy has been installed, in which case
importing numpy and calling `numpy.get_include()` will fail.
"""
numpy_incl = numpy.get_include()
for ext in self.extensions:
ext.include_dirs.append(numpy_incl)
super(build_ext, self).build_extensions()
return build_ext
def window_specialization(typename):
"""Make an extension for an AdjustedArrayWindow specialization."""
return Extension(
'zipline.lib._{name}window'.format(name=typename),
['zipline/lib/_{name}window.pyx'.format(name=typename)],
depends=['zipline/lib/_windowtemplate.pxi'],
)
ext_modules = [
Extension('zipline.assets._assets', ['zipline/assets/_assets.pyx']),
Extension('zipline.assets.continuous_futures',
['zipline/assets/continuous_futures.pyx']),
Extension('zipline.lib.adjustment', ['zipline/lib/adjustment.pyx']),
Extension('zipline.lib._factorize', ['zipline/lib/_factorize.pyx']),
window_specialization('float64'),
window_specialization('int64'),
window_specialization('int64'),
window_specialization('uint8'),
window_specialization('label'),
Extension('zipline.lib.rank', ['zipline/lib/rank.pyx']),
Extension('zipline.data._equities', ['zipline/data/_equities.pyx']),
Extension('zipline.data._adjustments', ['zipline/data/_adjustments.pyx']),
Extension('zipline._protocol', ['zipline/_protocol.pyx']),
Extension('zipline.gens.sim_engine', ['zipline/gens/sim_engine.pyx']),
Extension(
'zipline.data._minute_bar_internal',
['zipline/data/_minute_bar_internal.pyx']
),
Extension(
'zipline.utils.calendars._calendar_helpers',
['zipline/utils/calendars/_calendar_helpers.pyx']
),
Extension(
'zipline.data._resample',
['zipline/data/_resample.pyx']
),
Extension(
'zipline.pipeline.loaders.blaze._core',
['zipline/pipeline/loaders/blaze/_core.pyx'],
depends=['zipline/lib/adjustment.pxd'],
),
]
STR_TO_CMP = {
'<': lt,
'<=': le,
'=': eq,
'==': eq,
'>': gt,
'>=': ge,
}
SYS_VERSION = '.'.join(list(map(str, sys.version_info[:3])))
def _filter_requirements(lines_iter, filter_names=None,
filter_sys_version=False):
for line in lines_iter:
line = line.strip()
if not line or line.startswith('#'):
continue
match = REQ_PATTERN.match(line)
if match is None:
raise AssertionError(
"Could not parse requirement: '%s'" % line)
name = match.group('name')
if filter_names is not None and name not in filter_names:
continue
if filter_sys_version and match.group('pyspec'):
pycomp, pyspec = match.group('pycomp', 'pyspec')
comp = STR_TO_CMP[pycomp]
pyver_spec = StrictVersion(pyspec)
if comp(SYS_VERSION, pyver_spec):
# pip install -r understands lines with ;python_version<'3.0',
# but pip install -e does not. Filter here, removing the
# env marker.
yield line.split(';')[0]
continue
yield line
REQ_UPPER_BOUNDS = {
'bcolz': '<1',
'pandas': '<0.19',
}
def _with_bounds(req):
try:
req, lower = req.split('==')
except ValueError:
return req
else:
with_bounds = [req, '>=', lower]
upper = REQ_UPPER_BOUNDS.get(req)
if upper:
with_bounds.extend([',', upper])
return ''.join(with_bounds)
REQ_PATTERN = re.compile("(?P<name>[^=<>]+)(?P<comp>[<=>]{1,2})(?P<spec>[^;]+)"
"(?:(;\W*python_version\W*(?P<pycomp>[<=>]{1,2})\W*"
"(?P<pyspec>[0-9\.]+)))?")
def _conda_format(req):
def _sub(m):
name = m.group('name').lower()
if name == 'numpy':
return 'numpy x.x'
if name == 'tables':
name = 'pytables'
formatted = '%s %s%s' % ((name,) + m.group('comp', 'spec'))
pycomp, pyspec = m.group('pycomp', 'pyspec')
if pyspec:
# Compare the two-digit string versions as ints.
selector = ' # [int(py) %s int(%s)]' % (
pycomp, ''.join(pyspec.split('.')[:2]).ljust(2, '0')
)
return formatted + selector
return formatted
return REQ_PATTERN.sub(_sub, req, 1)
def read_requirements(path,
strict_bounds,
conda_format=False,
filter_names=None):
"""
Read a requirements.txt file, expressed as a path relative to Zipline root.
Returns requirements with the pinned versions as lower bounds
if `strict_bounds` is falsey.
"""
real_path = join(dirname(abspath(__file__)), path)
with open(real_path) as f:
reqs = _filter_requirements(f.readlines(), filter_names=filter_names,
filter_sys_version=not conda_format)
if not strict_bounds:
reqs = map(_with_bounds, reqs)
if conda_format:
reqs = map(_conda_format, reqs)
return list(reqs)
def install_requires(strict_bounds=False, conda_format=False):
return read_requirements('etc/requirements.txt',
strict_bounds=strict_bounds,
conda_format=conda_format)
def extras_requires(conda_format=False):
extras = {
extra: read_requirements('etc/requirements_{0}.txt'.format(extra),
strict_bounds=True,
conda_format=conda_format)
for extra in ('dev', 'talib')
}
extras['all'] = [req for reqs in extras.values() for req in reqs]
return extras
def setup_requirements(requirements_path, module_names, strict_bounds,
conda_format=False):
module_names = set(module_names)
module_lines = read_requirements(requirements_path,
strict_bounds=strict_bounds,
conda_format=conda_format,
filter_names=module_names)
if len(set(module_lines)) != len(module_names):
raise AssertionError(
"Missing requirements. Looking for %s, but found %s."
% (module_names, module_lines)
)
return module_lines
conda_build = os.path.basename(sys.argv[0]) in ('conda-build', # unix
'conda-build-script.py') # win
setup_requires = setup_requirements(
'etc/requirements.txt',
('Cython', 'numpy'),
strict_bounds=conda_build,
conda_format=conda_build,
)
conditional_arguments = {
'setup_requires' if not conda_build else 'build_requires': setup_requires,
}
setup(
name='zipline',
url="http://zipline.io",
version=versioneer.get_version(),
cmdclass=LazyBuildExtCommandClass(versioneer.get_cmdclass()),
description='A backtester for financial algorithms.',
entry_points={
'console_scripts': [
'zipline = zipline.__main__:main',
],
},
author='Quantopian Inc.',
author_email='opensource@quantopian.com',
packages=find_packages(include=['zipline', 'zipline.*']),
ext_modules=ext_modules,
include_package_data=True,
package_data={root.replace(os.sep, '.'):
['*.pyi', '*.pyx', '*.pxi', '*.pxd']
for root, dirnames, filenames in os.walk('zipline')
if '__pycache__' not in root},
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Office/Business :: Financial',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: System :: Distributed Computing',
],
install_requires=install_requires(conda_format=conda_build),
extras_require=extras_requires(conda_format=conda_build),
**conditional_arguments
)
| |
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from debtcollector import moves
from hacking import core
from neutron_lib.hacking import checks
import pep8
import six
def flake8ext(f):
"""Decorator to indicate flake8 extension.
This is borrowed from hacking.core.flake8ext(), but at now it is used
only for unit tests to know which are neutron flake8 extensions.
"""
f.name = __name__
return f
# Guidelines for writing new hacking checks
#
# - Use only for Neutron specific tests. OpenStack general tests
# should be submitted to the common 'hacking' module.
# - Pick numbers in the range N3xx. Find the current test with
# the highest allocated number and then pick the next value.
# - Keep the test method code in the source file ordered based
# on the N3xx value.
# - List the new rule in the top level HACKING.rst file
# - Add test cases for each new rule to
# neutron/tests/unit/hacking/test_checks.py
_all_log_levels = {
'reserved': '_', # this should never be used with a log unless
# it is a variable used for a log message and
# a exception
'error': '_LE',
'info': '_LI',
'warning': '_LW',
'critical': '_LC',
'exception': '_LE',
}
_all_hints = set(_all_log_levels.values())
def _regex_for_level(level, hint):
return r".*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % {
'level': level,
'wrong_hints': '|'.join(_all_hints - set([hint])),
}
log_string_interpolation = re.compile(r".*LOG\.(?:error|warn|warning|info"
r"|critical|exception|debug)"
r"\([^,]*%[^,]*[,)]")
log_translation_hint = re.compile(
'|'.join('(?:%s)' % _regex_for_level(level, hint)
for level, hint in six.iteritems(_all_log_levels)))
log_warn = re.compile(
r"(.)*LOG\.(warn)\(\s*('|\"|_)")
unittest_imports_dot = re.compile(r"\bimport[\s]+unittest\b")
unittest_imports_from = re.compile(r"\bfrom[\s]+unittest\b")
@flake8ext
def validate_log_translations(logical_line, physical_line, filename):
"""N320 - Log messages require translation."""
# Translations are not required in the test directory
if "neutron/tests" in filename:
return
if pep8.noqa(physical_line):
return
msg = "N320: Log messages require translation hints!"
if log_translation_hint.match(logical_line):
yield (0, msg)
@flake8ext
def use_jsonutils(logical_line, filename):
"""N321 - Use jsonutils instead of json."""
msg = "N321: jsonutils.%(fun)s must be used instead of json.%(fun)s"
# Some files in the tree are not meant to be run from inside Neutron
# itself, so we should not complain about them not using jsonutils
json_check_skipped_patterns = [
"neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/"
"plugins/netwrap",
]
for pattern in json_check_skipped_patterns:
if pattern in filename:
return
if "json." in logical_line:
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
for f in json_funcs:
pos = logical_line.find('json.%s' % f)
if pos != -1:
yield (pos, msg % {'fun': f[:-1]})
@flake8ext
def no_translate_debug_logs(logical_line, filename):
"""N319 - Check for 'LOG.debug(_(' and 'LOG.debug(_Lx('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
"""
for hint in _all_hints:
if logical_line.startswith("LOG.debug(%s(" % hint):
yield(0, "N319 Don't translate debug level logs")
@flake8ext
def check_assert_called_once_with(logical_line, filename):
"""N322 - Try to detect unintended calls of nonexistent mock methods like:
assert_called_once
assertCalledOnceWith
assert_has_called
called_once_with
"""
if 'neutron/tests/' in filename:
if '.assert_called_once_with(' in logical_line:
return
uncased_line = logical_line.lower().replace('_', '')
check_calls = ['.assertcalledonce', '.calledoncewith']
if any(x for x in check_calls if x in uncased_line):
msg = ("N322: Possible use of no-op mock method. "
"please use assert_called_once_with.")
yield (0, msg)
if '.asserthascalled' in uncased_line:
msg = ("N322: Possible use of no-op mock method. "
"please use assert_has_calls.")
yield (0, msg)
@flake8ext
def check_no_contextlib_nested(logical_line, filename):
"""N324 - Don't use contextlib.nested."""
msg = ("N324: contextlib.nested is deprecated. With Python 2.7 and later "
"the with-statement supports multiple nested objects. See https://"
"docs.python.org/2/library/contextlib.html#contextlib.nested for "
"more information.")
if checks.contextlib_nested.match(logical_line):
yield(0, msg)
@flake8ext
def check_python3_xrange(logical_line):
"""N325 - Do not use xrange."""
if re.search(r"\bxrange\s*\(", logical_line):
yield(0, "N325: Do not use xrange. Use range, or six.moves.range for "
"large loops.")
@flake8ext
def check_no_basestring(logical_line):
"""N326 - Don't use basestring."""
if re.search(r"\bbasestring\b", logical_line):
msg = ("N326: basestring is not Python3-compatible, use "
"six.string_types instead.")
yield(0, msg)
@flake8ext
def check_python3_no_iteritems(logical_line):
"""N327 - Use six.iteritems()"""
if re.search(r".*\.iteritems\(\)", logical_line):
msg = ("N327: Use six.iteritems() instead of dict.iteritems().")
yield(0, msg)
@flake8ext
def check_asserttruefalse(logical_line, filename):
"""N328 - Don't use assertEqual(True/False, observed)."""
if 'neutron/tests/' in filename:
if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?", logical_line):
msg = ("N328: Use assertTrue(observed) instead of "
"assertEqual(True, observed)")
yield (0, msg)
if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?", logical_line):
msg = ("N328: Use assertTrue(observed) instead of "
"assertEqual(True, observed)")
yield (0, msg)
if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?", logical_line):
msg = ("N328: Use assertFalse(observed) instead of "
"assertEqual(False, observed)")
yield (0, msg)
if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?", logical_line):
msg = ("N328: Use assertFalse(observed) instead of "
"assertEqual(False, observed)")
yield (0, msg)
check_asserttrue = flake8ext(
moves.moved_function(
check_asserttruefalse, 'check_asserttrue', __name__,
version='Newton', removal_version='Ocata'))
check_assertfalse = flake8ext(
moves.moved_function(
check_asserttruefalse, 'check_assertfalse', __name__,
version='Newton', removal_version='Ocata'))
@flake8ext
def no_mutable_default_args(logical_line):
"""N329 - Don't use mutable default arguments."""
msg = "N329: Method's default argument shouldn't be mutable!"
if checks.mutable_default_args.match(logical_line):
yield (0, msg)
@flake8ext
def check_assertempty(logical_line, filename):
"""N330 - Enforce using assertEqual parameter ordering in case of empty
objects.
"""
if 'neutron/tests/' in filename:
msg = ("N330: Use assertEqual(*empty*, observed) instead of "
"assertEqual(observed, *empty*). *empty* contains "
"{}, [], (), set(), '', \"\"")
empties = r"(\[\s*\]|\{\s*\}|\(\s*\)|set\(\s*\)|'\s*'|\"\s*\")"
reg = r"assertEqual\(([^,]*,\s*)+?%s\)\s*$" % empties
if re.search(reg, logical_line):
yield (0, msg)
@flake8ext
def check_assertisinstance(logical_line, filename):
"""N331 - Enforce using assertIsInstance."""
if 'neutron/tests/' in filename:
if re.search(r"assertTrue\(\s*isinstance\(\s*[^,]*,\s*[^,]*\)\)",
logical_line):
msg = ("N331: Use assertIsInstance(observed, type) instead "
"of assertTrue(isinstance(observed, type))")
yield (0, msg)
@flake8ext
def check_assertequal_for_httpcode(logical_line, filename):
"""N332 - Enforce correct oredering for httpcode in assertEqual."""
msg = ("N332: Use assertEqual(expected_http_code, observed_http_code) "
"instead of assertEqual(observed_http_code, expected_http_code)")
if 'neutron/tests/' in filename:
if re.search(r"assertEqual\(\s*[^,]*,[^,]*HTTP[^\.]*\.code\s*\)",
logical_line):
yield (0, msg)
@flake8ext
def check_log_warn_deprecated(logical_line, filename):
"""N333 - Use LOG.warning."""
msg = "N333: Use LOG.warning due to compatibility with py3"
if log_warn.match(logical_line):
yield (0, msg)
@flake8ext
def check_oslo_i18n_wrapper(logical_line, filename, noqa):
"""N340 - Check for neutron.i18n usage.
Okay(neutron/foo/bar.py): from neutron._i18n import _
Okay(neutron_lbaas/foo/bar.py): from neutron_lbaas._i18n import _
N340(neutron/foo/bar.py): from neutron.i18n import _
N340(neutron_lbaas/foo/bar.py): from neutron_lbaas.i18n import _
N340(neutron_lbaas/foo/bar.py): from neutron.i18n import _
N340(neutron_lbaas/foo/bar.py): from neutron._i18n import _
Okay(neutron/foo/bar.py): from neutron.i18n import _ # noqa
"""
if noqa:
return
split_line = logical_line.split()
modulename = os.path.normpath(filename).split('/')[0]
bad_i18n_module = '%s.i18n' % modulename
if (len(split_line) > 1 and split_line[0] in ('import', 'from')):
if (split_line[1] == bad_i18n_module or
modulename != 'neutron' and split_line[1] in ('neutron.i18n',
'neutron._i18n')):
msg = ("N340: %(found)s is found. Use %(module)s._i18n instead."
% {'found': split_line[1], 'module': modulename})
yield (0, msg)
@flake8ext
def check_builtins_gettext(logical_line, tokens, filename, lines, noqa):
"""N341 - Check usage of builtins gettext _().
Okay(neutron/foo.py): from neutron._i18n import _\n_('foo')
N341(neutron/foo.py): _('foo')
Okay(neutron/_i18n.py): _('foo')
Okay(neutron/i18n.py): _('foo')
Okay(neutron/foo.py): _('foo') # noqa
"""
if noqa:
return
modulename = os.path.normpath(filename).split('/')[0]
if '%s/tests' % modulename in filename:
return
if os.path.basename(filename) in ('i18n.py', '_i18n.py'):
return
token_values = [t[1] for t in tokens]
i18n_wrapper = '%s._i18n' % modulename
if '_' in token_values:
i18n_import_line_found = False
for line in lines:
split_line = [elm.rstrip(',') for elm in line.split()]
if (len(split_line) > 1 and split_line[0] == 'from' and
split_line[1] == i18n_wrapper and
'_' in split_line):
i18n_import_line_found = True
break
if not i18n_import_line_found:
msg = ("N341: _ from python builtins module is used. "
"Use _ from %s instead." % i18n_wrapper)
yield (0, msg)
@core.flake8ext
@core.off_by_default
def check_unittest_imports(logical_line):
"""N334 - Use unittest2 instead of unittest"""
if (re.match(unittest_imports_from, logical_line) or
re.match(unittest_imports_dot, logical_line)):
msg = "N334: '%s' must be used instead of '%s'." % (
logical_line.replace('unittest', 'unittest2'), logical_line)
yield (0, msg)
@flake8ext
def check_delayed_string_interpolation(logical_line, filename, noqa):
"""N342 String interpolation should be delayed at logging calls.
N342: LOG.debug('Example: %s' % 'bad')
Okay: LOG.debug('Example: %s', 'good')
"""
msg = ("N342 String interpolation should be delayed to be "
"handled by the logging code, rather than being done "
"at the point of the logging call. "
"Use ',' instead of '%'.")
if noqa:
return
if 'neutron/tests/' in filename:
return
if log_string_interpolation.match(logical_line):
yield(0, msg)
def factory(register):
register(validate_log_translations)
register(use_jsonutils)
register(check_assert_called_once_with)
register(no_translate_debug_logs)
register(check_no_contextlib_nested)
register(check_python3_xrange)
register(check_no_basestring)
register(check_python3_no_iteritems)
register(check_asserttruefalse)
register(no_mutable_default_args)
register(check_assertempty)
register(check_assertisinstance)
register(check_assertequal_for_httpcode)
register(check_log_warn_deprecated)
register(check_oslo_i18n_wrapper)
register(check_builtins_gettext)
register(check_unittest_imports)
register(check_delayed_string_interpolation)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
import datetime
import re
import reversion
from django.db import models
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from snisi_core.models.common import pre_save_report, post_save_report
from snisi_core.models.Reporting import (SNISIReport,
PeriodicAggregatedReportInterface,
PERIODICAL_SOURCE,
PERIODICAL_AGGREGATED)
from snisi_core.models.Periods import (WeekPeriod, ONE_WEEK_DELTA,
Period,
ONE_MICROSECOND_DELTA,
SpecificTypeManager,
normalize_date)
from snisi_nutrition.xls_export import nutrition_weekly_as_xls
logger = logging.getLogger(__name__)
class NutWeekManager(SpecificTypeManager):
SPECIFIC_TYPE = 'nut_week'
class NutWeekPeriod(WeekPeriod):
class Meta:
proxy = True
verbose_name = _("Week Period")
verbose_name_plural = _("Week Periods")
objects = NutWeekManager()
@classmethod
def type(cls):
return 'nut_week'
@property
def pid(self):
return'nW{}'.format(self.middle().strftime('%W-%Y'))
def name(self):
return "SN{}".format(self.middle().strftime('%W-%Y'))
@classmethod
def boundaries(cls, date_obj):
date_obj = normalize_date(date_obj, as_aware=True)
monday = date_obj - datetime.timedelta(date_obj.weekday())
monday = monday.replace(hour=0, minute=0, second=0, microsecond=0)
friday_morning_dt = datetime.timedelta(days=4, minutes=0)
friday_morning = monday + friday_morning_dt
is_next_week = not date_obj < friday_morning
if not is_next_week:
start = friday_morning - ONE_WEEK_DELTA
else:
start = friday_morning
end = start + datetime.timedelta(cls.delta()) - ONE_MICROSECOND_DELTA
return (start, end)
def strid(self):
return self.middle().strftime('nW%W-%Y')
@classmethod
def from_url_str(cls, period_str):
rgxp = r'nW([0-9]{1,2})\-([0-9]{4})'
if re.match(rgxp, period_str):
week_num, year = [
int(x) for x in re.match(rgxp, period_str).groups()]
p = cls.find_create_from(year, 1, 1, dont_create=True)
if not p.contains(datetime.datetime(year, 1, 1)):
p = p.previous()
for idx in range(1, week_num + 1):
p = p.following()
return p
return Period.from_url_str(period_str)
class NutWeekReportingManager(models.Manager):
def get_queryset(self):
return super(NutWeekReportingManager, self) \
.get_queryset().filter(period_type='nut_week_reporting_period')
class NutWeekReportingPeriod(WeekPeriod):
class Meta:
proxy = True
verbose_name = _("Week Reporting Period")
verbose_name_plural = _("Week Reporting Periods")
objects = NutWeekReportingManager()
@classmethod
def type(cls):
return 'nut_week_reporting_period'
@property
def pid(self):
return'nWRP{}'.format(self.middle().strftime('%W-%Y'))
@classmethod
def boundaries(cls, date_obj):
nut_week = NutWeekPeriod.find_create_by_date(
date_obj, dont_create=True)
start = nut_week.end_on + ONE_MICROSECOND_DELTA
end = start + datetime.timedelta(days=1)
return start, end
def strid(self):
return self.middle().strftime('nWRP%W-%Y')
class NutWeekExtendedReportingManager(models.Manager):
def get_queryset(self):
return super(NutWeekExtendedReportingManager, self) \
.get_queryset().filter(
period_type='nut_week_extended_reporting_period')
class NutWeekExtendedReportingPeriod(WeekPeriod):
class Meta:
proxy = True
verbose_name = _("Week Reporting Period")
verbose_name_plural = _("Week Reporting Periods")
objects = NutWeekExtendedReportingManager()
@classmethod
def type(cls):
return 'nut_week_extended_reporting_period'
@property
def pid(self):
return'nWERP{}'.format(self.middle().strftime('%W-%Y'))
@classmethod
def boundaries(cls, date_obj):
nut_week = NutWeekReportingPeriod.find_create_by_date(
date_obj, dont_create=True)
start = nut_week.end_on + ONE_MICROSECOND_DELTA
end = start + datetime.timedelta(days=2)
return start, end
def strid(self):
return self.middle().strftime('nWERP%W-%Y')
class NutWeekDistrictValidationManager(models.Manager):
def get_queryset(self):
return super(NutWeekDistrictValidationManager, self) \
.get_queryset().filter(period_type='nut_week_district_validation')
class NutWeekDistrictValidationPeriod(WeekPeriod):
class Meta:
proxy = True
verbose_name = _("Week District Validation Period")
verbose_name_plural = _("Week District Validation Periods")
objects = NutWeekDistrictValidationManager()
@classmethod
def type(cls):
return 'nut_week_district_validation'
@property
def pid(self):
return'nWDVP{}'.format(self.middle().strftime('%W-%Y'))
@classmethod
def boundaries(cls, date_obj):
nut_week = NutWeekPeriod.find_create_by_date(
date_obj, dont_create=True)
start = nut_week.end_on + ONE_MICROSECOND_DELTA
end = start + datetime.timedelta(days=4)
return start, end
def strid(self):
return self.middle().strftime('nWVP%W-%Y')
class NutWeekRegionValidationManager(models.Manager):
def get_queryset(self):
return super(NutWeekRegionValidationManager, self) \
.get_queryset().filter(period_type='nut_week_region_validation')
class NutWeekRegionValidationPeriod(WeekPeriod):
class Meta:
proxy = True
verbose_name = _("Week Region Validation Period")
verbose_name_plural = _("Week Region Validation Periods")
objects = NutWeekRegionValidationManager()
@classmethod
def type(cls):
return 'nut_week_region_validation'
@property
def pid(self):
return'nWRVP{}'.format(self.middle().strftime('%W-%Y'))
@classmethod
def boundaries(cls, date_obj):
district_val_period = NutWeekDistrictValidationPeriod \
.find_create_by_date(date_obj, dont_create=True)
start = district_val_period.end_on + ONE_MICROSECOND_DELTA
end = start + datetime.timedelta(days=1)
return start, end
def strid(self):
return self.middle().strftime('nWRVP%W-%Y')
class AbstractWeeklyNutritionR(SNISIReport):
class Meta:
app_label = 'snisi_nutrition'
abstract = True
urenam_screening = models.PositiveIntegerField(
_("MAM Screening"), default=0)
urenam_cases = models.PositiveIntegerField(
_("MAM Cases"), default=0)
urenam_deaths = models.PositiveIntegerField(
_("MAM Deaths"), default=0)
urenas_screening = models.PositiveIntegerField(
_("SAM Screening"), default=0)
urenas_cases = models.PositiveIntegerField(
_("SAM Cases"), default=0)
urenas_deaths = models.PositiveIntegerField(
_("SAM Deaths"), default=0)
ureni_screening = models.PositiveIntegerField(
_("SAM+ Screening"), default=0)
ureni_cases = models.PositiveIntegerField(
_("SAM+ Cases"), default=0)
ureni_deaths = models.PositiveIntegerField(
_("SAM+ Deaths"), default=0)
def screening_fields(self):
return [field for field in self.data_fields()
if field.endswith('screening')]
def cases_fields(self):
return [field for field in self.data_fields()
if field.endswith('cases')]
def deaths_fields(self):
return [field for field in self.data_fields()
if field.endswith('deaths')]
def total_screening(self):
return sum([getattr(self, field, 0)
for field in self.screening_fields()])
def total_cases(self):
return sum([getattr(self, field, 0)
for field in self.cases_fields()])
def total_deaths(self):
return sum([getattr(self, field, 0)
for field in self.deaths_fields()])
@property
def sam_screening(self):
return sum([self.urenas_screening, self.ureni_screening])
@property
def sam_cases(self):
return sum([self.urenas_cases, self.ureni_cases])
@property
def sam_deaths(self):
return sum([self.urenas_deaths, self.ureni_deaths])
def as_xls(self):
file_name = "NUThebdo_{entity}.{period}.xls" \
.format(entity=self.entity.slug,
period=self.period.strid())
return file_name, nutrition_weekly_as_xls(self)
class WeeklyNutritionR(AbstractWeeklyNutritionR):
REPORTING_TYPE = PERIODICAL_SOURCE
RECEIPT_FORMAT = "{period}-WNUT/{entity__slug}-{rand}"
UNIQUE_TOGETHER = [('period', 'entity')]
class Meta:
app_label = 'snisi_nutrition'
verbose_name = _("Weekly Nutrition Report")
verbose_name_plural = _("Weekly Nutrition Reports")
receiver(pre_save, sender=WeeklyNutritionR)(pre_save_report)
receiver(post_save, sender=WeeklyNutritionR)(post_save_report)
reversion.register(WeeklyNutritionR)
class AggWeeklyNutritionR(AbstractWeeklyNutritionR,
PeriodicAggregatedReportInterface,
SNISIReport):
REPORTING_TYPE = PERIODICAL_AGGREGATED
RECEIPT_FORMAT = "{period}-WNUTa/{entity__slug}-{rand}"
INDIVIDUAL_CLS = WeeklyNutritionR
UNIQUE_TOGETHER = [('period', 'entity')]
class Meta:
app_label = 'snisi_nutrition'
verbose_name = _("Aggregated Weekly Nutrition Report")
verbose_name_plural = _("Aggregated Weekly Nutrition Reports")
indiv_sources = models.ManyToManyField(
INDIVIDUAL_CLS,
verbose_name=_(u"Primary. Sources"),
blank=True,
related_name='source_agg_%(class)s_reports')
direct_indiv_sources = models.ManyToManyField(
INDIVIDUAL_CLS,
verbose_name=_("Primary. Sources (direct)"),
blank=True,
related_name='direct_source_agg_%(class)s_reports')
@classmethod
def update_instance_with_indiv(cls, report, instance):
for field in cls.data_fields():
setattr(report, field,
getattr(report, field, 0) + getattr(instance, field, 0))
@classmethod
def update_instance_with_agg(cls, report, instance):
for field in cls.data_fields():
setattr(report, field,
getattr(report, field, 0) + getattr(instance, field, 0))
@classmethod
def create_from(cls, period, entity, created_by,
indiv_sources=None, agg_sources=None):
if indiv_sources is None:
if entity.type.slug in ('health_center', 'health_district'):
indiv_sources = cls.INDIVIDUAL_CLS.objects.filter(
period__start_on__gte=period.start_on,
period__end_on__lte=period.end_on) \
.filter(entity__in=entity.get_health_centers())
else:
indiv_sources = []
if agg_sources is None and not len(indiv_sources):
agg_sources = cls.objects.filter(
period__start_on__gte=period.start_on,
period__end_on__lte=period.end_on) \
.filter(entity__in=entity.get_natural_children(
skip_slugs=['health_area']))
return super(cls, cls).create_from(
period=period,
entity=entity,
created_by=created_by,
indiv_sources=indiv_sources,
agg_sources=agg_sources)
receiver(pre_save, sender=AggWeeklyNutritionR)(pre_save_report)
receiver(post_save, sender=AggWeeklyNutritionR)(post_save_report)
reversion.register(AggWeeklyNutritionR)
| |
# -*- coding: utf-8 -*-
'''
Module for interacting with the GitHub v3 API.
.. versionadded:: 2016.3.0.
:depends: PyGithub python module
Configuration
-------------
Configure this module by specifying the name of a configuration
profile in the minion config, minion pillar, or master config. The module
will use the 'github' key by default, if defined.
For example:
.. code-block:: yaml
github:
token: abc1234
org_name: my_organization
# optional: only some functions, such as 'add_user',
# require a dev_team_id
dev_team_id: 1234
'''
# Import python libs
from __future__ import absolute_import
import logging
# Import Salt Libs
from salt.exceptions import CommandExecutionError
# Import third party libs
HAS_LIBS = False
try:
import github
import github.PaginatedList
import github.NamedUser
from github.GithubException import UnknownObjectException
HAS_LIBS = True
except ImportError:
pass
log = logging.getLogger(__name__)
__virtualname__ = 'github'
def __virtual__():
'''
Only load this module if PyGithub is installed on this minion.
'''
if HAS_LIBS:
return __virtualname__
return (False, 'The github execution module cannot be loaded: '
'PyGithub library is not installed.')
def _get_profile(profile):
config = __salt__['config.option'](profile)
if not config:
raise CommandExecutionError(
'Authentication information could not be found for the '
'\'{0}\' profile.'.format(profile)
)
return config
def _get_secret_key(profile):
token = _get_profile(profile).get('token')
if not token:
raise CommandExecutionError(
'The required \'token\' parameter was not found in the '
'\'{0}\' profile.'.format(profile)
)
return token
def _get_org_name(profile):
org_name = _get_profile(profile).get('org_name')
if not org_name:
raise CommandExecutionError(
'The required \'org_name\' parameter was not found in the '
'\'{0}\' profile.'.format(profile)
)
return org_name
def _get_dev_team_id(profile):
dev_team_id = _get_profile(profile).get('dev_team_id')
if not dev_team_id:
raise CommandExecutionError(
'The \'dev_team_id\' option was not found in the \'{0}\' '
'profile.'.format(profile)
)
return dev_team_id
def _get_client(profile):
'''
Return the GitHub client, cached into __context__ for performance
'''
key = 'github.{0}:{1}'.format(
_get_secret_key(profile),
_get_org_name(profile)
)
if key not in __context__:
__context__[key] = github.Github(
_get_secret_key(profile),
)
return __context__[key]
def _get_members(organization, params=None):
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
organization._requester,
organization.url + "/members",
params
)
def list_users(profile="github"):
'''
List all users within the organization.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.list_users
salt myminion github.list_users profile='my-github-profile'
'''
key = "github.{0}:users".format(
_get_org_name(profile)
)
if key not in __context__:
client = _get_client(profile)
organization = client.get_organization(_get_org_name(profile))
users = [member.login for member in _get_members(organization, None)]
__context__[key] = users
return __context__[key]
def get_user(name, profile='github', user_details=False):
'''
Get a GitHub user by name.
name
The user for which to obtain information.
profile
The name of the profile configuration to use. Defaults to ``github``.
user_details
Prints user information details. Defaults to ``False``. If the user is
already in the organization and user_details is set to False, the
get_user function returns ``True``. If the user is not already present
in the organization, user details will be printed by default.
CLI Example:
.. code-block:: bash
salt myminion github.get_user github-handle
salt myminion github.get_user github-handle user_details=true
'''
if not user_details and name in list_users(profile):
# User is in the org, no need for additional Data
return True
response = {}
client = _get_client(profile)
organization = client.get_organization(_get_org_name(profile))
try:
user = client.get_user(name)
except UnknownObjectException as e:
logging.exception("Resource not found {0}: ".format(str(e)))
return False
response['company'] = user.company
response['created_at'] = user.created_at
response['email'] = user.email
response['html_url'] = user.html_url
response['id'] = user.id
response['login'] = user.login
response['name'] = user.name
response['type'] = user.type
response['url'] = user.url
try:
headers, data = organization._requester.requestJsonAndCheck(
"GET",
organization.url + "/memberships/" + user._identity
)
except UnknownObjectException as e:
response['membership_state'] = 'nonexistent'
response['in_org'] = False
return response
response['in_org'] = organization.has_in_members(user)
response['membership_state'] = data.get('state')
return response
def add_user(name, profile='github'):
'''
Add a GitHub user.
name
The user for which to obtain information.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.add_user github-handle
'''
client = _get_client(profile)
organization = client.get_organization(_get_org_name(profile))
try:
github_named_user = client.get_user(name)
except UnknownObjectException as e:
logging.exception("Resource not found {0}: ".format(str(e)))
return False
org_team = organization.get_team(_get_dev_team_id(profile))
try:
headers, data = org_team._requester.requestJsonAndCheck(
"PUT",
org_team.url + "/memberships/" + github_named_user._identity,
input={'role': 'member'},
parameters={'role': 'member'}
)
except github.GithubException as e:
logging.error(str(e))
return True
headers, data = organization._requester.requestJsonAndCheck(
"GET",
organization.url + "/memberships/" + github_named_user._identity
)
return data.get('state') == 'pending'
def remove_user(name, profile='github'):
'''
Remove a Github user by name.
name
The user for which to obtain information.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.remove_user github-handle
'''
client = _get_client(profile)
organization = client.get_organization(_get_org_name(profile))
try:
git_user = client.get_user(name)
except UnknownObjectException as e:
logging.exception("Resource not found: {0}".format(str(e)))
return False
if organization.has_in_members(git_user):
organization.remove_from_members(git_user)
return not organization.has_in_members(git_user)
| |
#base class for sqlalchemy models
from sqlalchemy.ext.declarative import declarative_base
import json
from sqlalchemy import create_engine
import modelconfig
# import ModelBase
from sqlalchemy import BigInteger, Text, DateTime, Boolean, Column, Float
from sqlalchemy import Integer, ForeignKey, ForeignKeyConstraint
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.dialects.postgresql import JSON
Base = declarative_base()
class HashTag(Base):
__tablename__ = 'hashtags'
status = Column(BigInteger, primary_key = True)
hashtag = Column(Text, primary_key = True)
#relationship of user A follows user B
class Follows(Base):
__tablename__ = 'follows'
usera = Column(BigInteger, primary_key=True)
userb = Column(BigInteger, primary_key=True)
class Twitter_User(Base):
__tablename__ = 'twitter_user'
uid = Column(BigInteger, primary_key=True)
user_name = Column(Text)
profile = Column(Text)
created_at = Column(DateTime)
rawjson = Column(JSON)
user_id = Column(BigInteger)
num_followers = Column(Integer)
istarget = Column(Boolean)
statuses_count = Column(Integer)
# print 'test'
# statuses = relationship("Status",backref='twitter_user')
def __repr__(self):
return """<twitter_user(id='%s',user_name='%s',profile='%s',
created_at='%s',rawjson='%s',user_id='%s',num_followers='%s',
istarget='%s',statuses_count='%s')>""" % (self.uid,
self.user_name,
self.profile,
self.created_at,
self.rawjson,
self.user_id,
self.num_followers,
self.istarget,
self.statuses_count)
class Status(Base):
__tablename__ = 'status'
sid = Column(BigInteger, primary_key=True)
txt = Column(Text)
status_reply_id = Column(BigInteger, ForeignKey('status.sid'))
status_reply_id_holding = Column(BigInteger)
author_id = Column(BigInteger, ForeignKey('twitter_user.uid'))
retweet_id = Column(BigInteger, ForeignKey('status.sid'))
retweet_id_holding = Column(BigInteger)
user_reply_id = Column(BigInteger) # ,ForeignKey('twitter_user.uid'))
retweet_count = Column(BigInteger)
rawjson = Column(JSON)
created_at = Column(DateTime)
trend_name = Column(Text)
trend_query = Column(Text)
url = Column(Text)
is_truncated = Column(Boolean)
hashtagisset = Column(Boolean)
ForeignKeyConstraint([trend_name, trend_query],
['trend.trend_name', 'trend.trend_query'])
retweets = relationship("Status", foreign_keys=[retweet_id])
replies = relationship("Status", foreign_keys=[status_reply_id])
def __repr__(self):
return """<Status(sid='%s',txt='%s',status_reply_id='%s',
status_reply_id_holding='%s',
author_id='%s',retweet_id='%s',retweet_id_holding='%s',user_reply_id='%s',
retweet_count='%s',rawjson='%s',created_at='%s',trend_name='%s',
trend_query='%s',retweets='%s',replies='%s,url='%s',is_truncated='%s'""" % (
self.sid,
self.txt,
self.status_reply_id,
self.status_reply_id_holding,
self.author_id,
self.retweet_id,
self.retweet_id_holding,
self.user_reply_id,
self.retweet_count,
self.rawjson,
self.created_at,
self.trend_name,
self.trend_query,
self.retweets,
self.replies,
self.url,
self.is_truncated)
@staticmethod
def AddManyStatusFromTweepy(statuses):
sesh = SessionFactory()
[sesh.add(Status.StatusFromTweepy(s)) for s in statuses
if(sesh.query(Status).filter(Status.sid == s.id).scalar is None)]
sesh.commit()
sesh.close()
def hashtags(self):
if(type(self.rawjson) == str or type(self.rawjson) == unicode):
js = json.loads(self.rawjson)
else:
js = self.rawjson
if('entities' in js):
entities = js['entities']
if 'hashtags' in entities:
hts = entities['hashtags']
return [HashTag(hashtag = ht['text'], status=self.sid) for ht in hts]
@staticmethod
def StatusFromTweepy(tweepyStatus, verbose=False, trend=None, sesh = None):
if(trend is not None):
trend_name = trend.trend_name
query = trend.query
else:
trend_name = None
query = None
if(hasattr(tweepyStatus,'retweeted_status')):
retweeted_id = tweepyStatus.retweeted_status.id
else:
retweeted_id = None
return Status(sid=tweepyStatus.id,
txt=tweepyStatus.text,
status_reply_id_holding=tweepyStatus.in_reply_to_user_id,
status_reply_id=None,
author_id=tweepyStatus.author.id,
retweet_id=None,
retweet_id_holding=retweeted_id,
retweet_count=tweepyStatus.retweet_count,
user_reply_id=tweepyStatus.in_reply_to_status_id,
rawjson=tweepyStatus._json,
created_at=tweepyStatus.created_at,
trend_name=trend_name,
trend_query=query,
url=None,
is_truncated=None
)
@staticmethod
def StatusFromOldRecord(record):
print("making status")
return Status(sid=record.id,
txt=record.txt,
status_reply_id_holding=record.status_reply_id,
status_reply_id=None,
author_id=record.author_id,
retweet_id=None,
retweet_id_holding=record.retweet_id,
user_reply_id=record.user_reply_id,
retweet_count=record.retweet_count,
rawjson=record.rawjson,
created_at=record.created_at,
trend_name=record.trend_name,
trend_query=record.trend_query,
url=None,
is_truncated=None)
class Bot(Base):
__tablename__ = 'bot'
uid = Column(BigInteger, ForeignKey('twitter_user.uid'),
primary_key=True)
access_key = Column(Text)
access_secret = Column(Text)
email = Column(Text)
password = Column(Text)
alias = Column(Text)
botrole = Column(Text) # scoreing, streaming, support
activity_level = Column(Float)
lastmention = Column(BigInteger)
lastmessage = Column(BigInteger)
lastawake = Column(DateTime)
lastspam = Column(DateTime)
@staticmethod
def fromRecordArray(rg):
rg = [e.strip(" '\"\r\n") for e in rg]
return Bot(uid=rg[0], access_key=rg[1], access_secret=rg[2],
email=rg[3], password=rg[4], alias=rg[5])
def __repr__(self):
return "Bot<uid=%s,access_key=%s,access_secret=%s,email=%s,password=%s,alias=%s>"%(self.uid,self.access_key,self.access_secret,self.email,self.password, self.alias)
class Trend(Base):
__tablename__ = 'trend'
trend_name = Column(Text, primary_key=True)
query = Column(Text, primary_key=True)
created_at = Column(DateTime)
as_of = Column(DateTime)
def __repr__(self):
return """<Trend(trend_name='%s',query='%s',created_at='%s'
,as_of ='%s'""" % (self.trend_name,
self.query,
self.created_at,
self.as_of)
config = modelconfig.modelconfig()
engine = create_engine(config.connectionString, echo=config.echo, pool_size=400,max_overflow=100)
# uncomment to create db
Base.metadata.create_all(engine)
SessionFactory = sessionmaker(bind=engine)
| |
# stdlib
import os
# 3p
from nose.plugins.attrib import attr
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
from util import get_hostname
@attr(requires='haproxy')
class HaproxyTest(AgentCheckTest):
CHECK_NAME = 'haproxy'
BACKEND_SERVICES = ['anotherbackend', 'datadog']
BACKEND_LIST = ['singleton:8080', 'singleton:8081', 'otherserver']
FRONTEND_CHECK_GAUGES = [
'haproxy.frontend.session.current',
'haproxy.frontend.session.limit',
'haproxy.frontend.session.pct',
]
FRONTEND_CHECK_GAUGES_POST_1_4 = [
'haproxy.frontend.requests.rate',
]
BACKEND_CHECK_GAUGES = [
'haproxy.backend.queue.current',
'haproxy.backend.session.current',
]
BACKEND_CHECK_GAUGES_POST_1_5 = [
'haproxy.backend.queue.time',
'haproxy.backend.connect.time',
'haproxy.backend.response.time',
'haproxy.backend.session.time',
]
FRONTEND_CHECK_RATES = [
'haproxy.frontend.bytes.in_rate',
'haproxy.frontend.bytes.out_rate',
'haproxy.frontend.denied.req_rate',
'haproxy.frontend.denied.resp_rate',
'haproxy.frontend.errors.req_rate',
'haproxy.frontend.session.rate',
]
FRONTEND_CHECK_RATES_POST_1_4 = [
'haproxy.frontend.response.1xx',
'haproxy.frontend.response.2xx',
'haproxy.frontend.response.3xx',
'haproxy.frontend.response.4xx',
'haproxy.frontend.response.5xx',
'haproxy.frontend.response.other',
]
BACKEND_CHECK_RATES = [
'haproxy.backend.bytes.in_rate',
'haproxy.backend.bytes.out_rate',
'haproxy.backend.denied.resp_rate',
'haproxy.backend.errors.con_rate',
'haproxy.backend.errors.resp_rate',
'haproxy.backend.session.rate',
'haproxy.backend.warnings.redis_rate',
'haproxy.backend.warnings.retr_rate',
]
BACKEND_CHECK_RATES_POST_1_4 = [
'haproxy.backend.response.1xx',
'haproxy.backend.response.2xx',
'haproxy.backend.response.3xx',
'haproxy.backend.response.4xx',
'haproxy.backend.response.5xx',
'haproxy.backend.response.other',
]
def __init__(self, *args, **kwargs):
AgentCheckTest.__init__(self, *args, **kwargs)
self.config = {
"instances": [{
'url': 'http://localhost:3835/stats',
'username': 'datadog',
'password': 'isdevops',
'status_check': True,
'collect_aggregates_only': False,
'tag_service_check_by_host': True,
}]
}
self.config_open = {
'instances': [{
'url': 'http://localhost:3836/stats',
'collect_aggregates_only': False,
}]
}
def _test_frontend_metrics(self, shared_tag):
frontend_tags = shared_tag + ['type:FRONTEND', 'service:public']
for gauge in self.FRONTEND_CHECK_GAUGES:
self.assertMetric(gauge, tags=frontend_tags, count=1)
if os.environ.get('FLAVOR_VERSION', '').split('.')[:2] >= ['1', '4']:
for gauge in self.FRONTEND_CHECK_GAUGES_POST_1_4:
self.assertMetric(gauge, tags=frontend_tags, count=1)
for rate in self.FRONTEND_CHECK_RATES:
self.assertMetric(rate, tags=frontend_tags, count=1)
if os.environ.get('FLAVOR_VERSION', '').split('.')[:2] >= ['1', '4']:
for rate in self.FRONTEND_CHECK_RATES_POST_1_4:
self.assertMetric(rate, tags=frontend_tags, count=1)
def _test_backend_metrics(self, shared_tag, services=None):
backend_tags = shared_tag + ['type:BACKEND']
if not services:
services = self.BACKEND_SERVICES
for service in services:
for backend in self.BACKEND_LIST:
tags = backend_tags + ['service:' + service, 'backend:' + backend]
for gauge in self.BACKEND_CHECK_GAUGES:
self.assertMetric(gauge, tags=tags, count=1)
if os.environ.get('FLAVOR_VERSION', '').split('.')[:2] >= ['1', '5']:
for gauge in self.BACKEND_CHECK_GAUGES_POST_1_5:
self.assertMetric(gauge, tags=tags, count=1)
for rate in self.BACKEND_CHECK_RATES:
self.assertMetric(rate, tags=tags, count=1)
if os.environ.get('FLAVOR_VERSION', '').split('.')[:2] >= ['1', '4']:
for rate in self.BACKEND_CHECK_RATES_POST_1_4:
self.assertMetric(rate, tags=tags, count=1)
def _test_service_checks(self, services=None):
if not services:
services = self.BACKEND_SERVICES
for service in services:
for backend in self.BACKEND_LIST:
tags = ['service:' + service, 'backend:' + backend]
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.UNKNOWN,
count=1,
tags=tags)
tags = ['service:' + service, 'backend:BACKEND']
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.OK,
count=1,
tags=tags)
def test_check(self):
self.run_check_twice(self.config)
shared_tag = ['instance_url:http://localhost:3835/stats']
self._test_frontend_metrics(shared_tag)
self._test_backend_metrics(shared_tag)
# check was run 2 times
# - FRONTEND is reporting OPEN that we ignore
# - only the BACKEND aggregate is reporting UP -> OK
# - The 3 individual servers are returning no check -> UNKNOWN
self._test_service_checks()
# Make sure the service checks aren't tagged with an empty hostname.
self.assertEquals(self.service_checks[0]['host_name'], get_hostname())
self.coverage_report()
def test_check_service_filter(self):
config = self.config
config['instances'][0]['services_include'] = ['datadog']
config['instances'][0]['services_exclude'] = ['.*']
self.run_check_twice(config)
shared_tag = ['instance_url:http://localhost:3835/stats']
self._test_backend_metrics(shared_tag, ['datadog'])
self._test_service_checks(['datadog'])
self.coverage_report()
def test_wrong_config(self):
config = self.config
config['instances'][0]['username'] = 'fake_username'
self.assertRaises(Exception, lambda: self.run_check(config))
# Test that nothing has been emitted
self.coverage_report()
def test_open_config(self):
self.run_check_twice(self.config_open)
shared_tag = ['instance_url:http://localhost:3836/stats']
self._test_frontend_metrics(shared_tag)
self._test_backend_metrics(shared_tag)
self._test_service_checks()
# This time, make sure the hostname is empty
self.assertEquals(self.service_checks[0]['host_name'], '')
self.coverage_report()
# Keeping a mocked test since it tests the internal
# process of service checks
def test_count_per_statuses(self):
from collections import defaultdict
self.run_check(self.config)
data = """# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,
a,FRONTEND,,,1,2,12,1,11,11,0,0,0,,,,,OPEN,,,,,,,,,1,1,0,,,,0,1,0,2,,,,0,1,0,0,0,0,,1,1,1,,,
a,BACKEND,0,0,0,0,12,0,11,11,0,0,,0,0,0,0,UP,0,0,0,,0,1221810,0,,1,1,0,,0,,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,
b,FRONTEND,,,1,2,12,11,11,0,0,0,0,,,,,OPEN,,,,,,,,,1,2,0,,,,0,0,0,1,,,,,,,,,,,0,0,0,,,
b,i-1,0,0,0,1,,1,1,0,,0,,0,0,0,0,UP,1,1,0,0,1,1,30,,1,3,1,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-2,0,0,1,1,,1,1,0,,0,,0,0,0,0,UP,1,1,0,0,0,1,0,,1,3,2,,71,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-3,0,0,0,1,,1,1,0,,0,,0,0,0,0,UP,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,BACKEND,0,0,1,2,0,421,1,0,0,0,,0,0,0,0,UP,6,6,0,,0,1,0,,1,3,0,,421,,1,0,,1,,,,,,,,,,,,,,0,0,
""".split('\n')
# per service
self.check._process_data(data, True, False, collect_status_metrics=True,
collect_status_metrics_by_host=False)
expected_hosts_statuses = defaultdict(int)
expected_hosts_statuses[('b', 'OPEN')] = 1
expected_hosts_statuses[('b', 'UP')] = 3
expected_hosts_statuses[('a', 'OPEN')] = 1
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
# with collect_aggregates_only set to True
self.check._process_data(data, True, True, collect_status_metrics=True,
collect_status_metrics_by_host=False)
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
# per host
self.check._process_data(data, True, False, collect_status_metrics=True,
collect_status_metrics_by_host=True)
expected_hosts_statuses = defaultdict(int)
expected_hosts_statuses[('b', 'FRONTEND', 'OPEN')] = 1
expected_hosts_statuses[('a', 'FRONTEND', 'OPEN')] = 1
expected_hosts_statuses[('b', 'i-1', 'UP')] = 1
expected_hosts_statuses[('b', 'i-2', 'UP')] = 1
expected_hosts_statuses[('b', 'i-3', 'UP')] = 1
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
self.check._process_data(data, True, True, collect_status_metrics=True,
collect_status_metrics_by_host=True)
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for working with lead sheets."""
import abc
import copy
import itertools
from magenta.music import chords_lib
from magenta.music import constants
from magenta.music import events_lib
from magenta.music import melodies_lib
from magenta.pipelines import statistics
from magenta.protobuf import music_pb2
# Constants.
DEFAULT_STEPS_PER_BAR = constants.DEFAULT_STEPS_PER_BAR
DEFAULT_STEPS_PER_QUARTER = constants.DEFAULT_STEPS_PER_QUARTER
DEFAULT_STEPS_PER_BAR = constants.DEFAULT_STEPS_PER_BAR
DEFAULT_STEPS_PER_QUARTER = constants.DEFAULT_STEPS_PER_QUARTER
# Shortcut to CHORD_SYMBOL annotation type.
CHORD_SYMBOL = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL
class MelodyChordsMismatchException(Exception):
pass
class LeadSheet(events_lib.EventSequence):
"""A wrapper around Melody and ChordProgression.
Attributes:
melody: A Melody object, the lead sheet melody.
chords: A ChordProgression object, the underlying chords.
"""
def __init__(self, melody=None, chords=None):
"""Construct a LeadSheet.
If `melody` and `chords` are specified, instantiate with the provided
melody and chords. Otherwise, create an empty LeadSheet.
Args:
melody: A Melody object.
chords: A ChordProgression object.
Raises:
MelodyChordsMismatchException: If the melody and chord progression differ
in temporal resolution or position in the source sequence, or if only
one of melody or chords is specified.
"""
if (melody is None) != (chords is None):
raise MelodyChordsMismatchException(
'melody and chords must be both specified or both unspecified')
if melody is not None:
self._from_melody_and_chords(melody, chords)
else:
self._reset()
def _reset(self):
"""Clear events and reset object state."""
self._melody = melodies_lib.Melody()
self._chords = chords_lib.ChordProgression()
def _from_melody_and_chords(self, melody, chords):
"""Initializes a LeadSheet with a given melody and chords.
Args:
melody: A Melody object.
chords: A ChordProgression object.
Raises:
MelodyChordsMismatchException: If the melody and chord progression differ
in temporal resolution or position in the source sequence.
"""
if (len(melody) != len(chords) or
melody.steps_per_bar != chords.steps_per_bar or
melody.steps_per_quarter != chords.steps_per_quarter or
melody.start_step != chords.start_step or
melody.end_step != chords.end_step):
raise MelodyChordsMismatchException()
self._melody = melody
self._chords = chords
def __iter__(self):
"""Return an iterator over (melody, chord) tuples in this LeadSheet.
Returns:
Python iterator over (melody, chord) event tuples.
"""
return itertools.izip(self._melody, self._chords)
def __getitem__(self, i):
"""Returns the melody-chord tuple at the given index."""
return self._melody[i], self._chords[i]
def __getslice__(self, i, j):
"""Returns the melody-chord tuples in the given slice range."""
return zip(self._melody[i:j], self._chords[i:j])
def __len__(self):
"""How many events (melody-chord tuples) are in this LeadSheet.
Returns:
Number of events as an integer.
"""
return len(self._melody)
def __deepcopy__(self, unused_memo=None):
return type(self)(copy.deepcopy(self._melody),
copy.deepcopy(self._chords))
def __eq__(self, other):
if not isinstance(other, LeadSheet):
return False
return (self._melody == other.melody and
self._chords == other.chords)
@property
def start_step(self):
return self._melody.start_step
@property
def end_step(self):
return self._melody.end_step
@property
def steps_per_bar(self):
return self._melody.steps_per_bar
@property
def steps_per_quarter(self):
return self._melody.steps_per_quarter
@property
def melody(self):
"""Return the melody of the lead sheet.
Returns:
The lead sheet melody, a Melody object.
"""
return self._melody
@property
def chords(self):
"""Return the chord progression of the lead sheet.
Returns:
The lead sheet chords, a ChordProgression object.
"""
return self._chords
def append_event(self, event):
"""Appends event to the end of the sequence and increments the end step.
Args:
event: The event (a melody-chord tuple) to append to the end.
"""
melody_event, chord_event = event
self._melody.append_event(melody_event)
self._chords.append_event(chord_event)
def to_sequence(self,
velocity=100,
instrument=0,
sequence_start_time=0.0,
qpm=120.0):
"""Converts the LeadSheet to NoteSequence proto.
Args:
velocity: Midi velocity to give each melody note. Between 1 and 127
(inclusive).
instrument: Midi instrument to give each melody note.
sequence_start_time: A time in seconds (float) that the first note (and
chord) in the sequence will land on.
qpm: Quarter notes per minute (float).
Returns:
A NoteSequence proto encoding the melody and chords from the lead sheet.
"""
sequence = self._melody.to_sequence(
velocity=velocity, instrument=instrument,
sequence_start_time=sequence_start_time, qpm=qpm)
chord_sequence = self._chords.to_sequence(
sequence_start_time=sequence_start_time, qpm=qpm)
# A little ugly, but just add the chord annotations to the melody sequence.
for text_annotation in chord_sequence.text_annotations:
if text_annotation.annotation_type == CHORD_SYMBOL:
chord = sequence.text_annotations.add()
chord.CopyFrom(text_annotation)
return sequence
def transpose(self, transpose_amount, min_note=0, max_note=128):
"""Transpose notes and chords in this LeadSheet.
All notes and chords are transposed the specified amount. Additionally,
all notes are octave shifted to lie within the [min_note, max_note) range.
Args:
transpose_amount: The number of half steps to transpose this
LeadSheet. Positive values transpose up. Negative values
transpose down.
min_note: Minimum pitch (inclusive) that the resulting notes will take on.
max_note: Maximum pitch (exclusive) that the resulting notes will take on.
"""
self._melody.transpose(transpose_amount, min_note, max_note)
self._chords.transpose(transpose_amount)
def squash(self, min_note, max_note, transpose_to_key):
"""Transpose and octave shift the notes and chords in this LeadSheet.
Args:
min_note: Minimum pitch (inclusive) that the resulting notes will take on.
max_note: Maximum pitch (exclusive) that the resulting notes will take on.
transpose_to_key: The lead sheet is transposed to be in this key.
Returns:
The transpose amount, in half steps.
"""
transpose_amount = self._melody.squash(min_note, max_note,
transpose_to_key)
self._chords.transpose(transpose_amount)
return transpose_amount
def set_length(self, steps):
"""Sets the length of the lead sheet to the specified number of steps.
Args:
steps: How many steps long the lead sheet should be.
"""
self._melody.set_length(steps)
self._chords.set_length(steps)
def increase_resolution(self, k):
"""Increase the resolution of a LeadSheet.
Increases the resolution of a LeadSheet object by a factor of `k`. This
increases the resolution of the melody and chords separately, which uses
MELODY_NO_EVENT to extend each event in the melody, and simply repeats each
chord event `k` times.
Args:
k: An integer, the factor by which to increase the resolution of the lead
sheet.
"""
self._melody.increase_resolution(k)
self._chords.increase_resolution(k)
def extract_lead_sheet_fragments(quantized_sequence,
min_bars=7,
gap_bars=1.0,
min_unique_pitches=5,
ignore_polyphonic_notes=True,
require_chords=False):
"""Extracts a list of lead sheet fragments from the given QuantizedSequence.
This function first extracts melodies using melodies_lib.extract_melodies,
then extracts the chords underlying each melody using
chords_lib.extract_chords_for_melodies.
Args:
quantized_sequence: A sequences_lib.QuantizedSequence object.
min_bars: Minimum length of melodies in number of bars. Shorter melodies are
discarded.
gap_bars: A melody comes to an end when this number of bars (measures) of
silence is encountered.
min_unique_pitches: Minimum number of unique notes with octave equivalence.
Melodies with too few unique notes are discarded.
ignore_polyphonic_notes: If True, melodies will be extracted from
`quantized_sequence` tracks that contain polyphony (notes start at the
same time). If False, tracks with polyphony will be ignored.
require_chords: If True, only return lead sheets that have at least one
chord other than NO_CHORD. If False, lead sheets with only melody will
also be returned.
Returns:
A python list of LeadSheet instances.
Raises:
NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
(derived from its time signature) is not an integer number of time
steps.
"""
stats = dict([('empty_chord_progressions',
statistics.Counter('empty_chord_progressions'))])
melodies, melody_stats = melodies_lib.extract_melodies(
quantized_sequence, min_bars=min_bars, gap_bars=gap_bars,
min_unique_pitches=min_unique_pitches,
ignore_polyphonic_notes=ignore_polyphonic_notes)
chord_progressions, chord_stats = chords_lib.extract_chords_for_melodies(
quantized_sequence, melodies)
lead_sheets = []
for melody, chords in zip(melodies, chord_progressions):
if chords is not None:
if require_chords and all(chord == chords_lib.NO_CHORD
for chord in chords):
stats['empty_chord_progressions'].increment()
else:
lead_sheet = LeadSheet(melody, chords)
lead_sheets.append(lead_sheet)
return lead_sheets, stats.values() + melody_stats + chord_stats
class LeadSheetEncoderDecoder(events_lib.EventsEncoderDecoder):
"""An abstract class for translating between lead sheets and model data.
When building your dataset, the `encode` method takes in a lead sheet and
returns a SequenceExample of inputs and labels. These SequenceExamples are
fed into the model during training and evaluation.
During lead sheet generation, the `get_inputs_batch` method takes in a list
of the current lead sheets and returns an inputs batch which is fed into the
model to predict what the next note and chord should be for each lead sheet.
The `extend_event_sequences` method takes in the list of lead sheets and the
softmax returned by the model and extends each lead sheet by one step by
sampling from the softmax probabilities. This loop (`get_inputs_batch` ->
inputs batch is fed through the model to get a softmax ->
`extend_event_sequences`) is repeated until the generated lead sheets have
reached the desired length.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def min_note(self):
"""The min pitch value to allow for melodies.
Returns:
An integer, the min pitch value to allow for melodies.
"""
pass
@abc.abstractproperty
def max_note(self):
"""The max pitch value to allow for melodies.
Returns:
An integer, the max pitch value to allow for melodies.
"""
pass
@abc.abstractproperty
def transpose_to_key(self):
"""The key, an integer from 0 to 11 inclusive, into which to transpose.
Returns:
An integer, the key into which to transpose.
"""
pass
def squash_and_encode(self, lead_sheet):
"""Returns a SequenceExample for the given lead sheet.
Args:
lead_sheet: A LeadSheet object.
Returns:
A tf.train.SequenceExample containing inputs and labels.
"""
lead_sheet.squash(self.min_note, self.max_note, self.transpose_to_key)
return self._encode(lead_sheet)
class LeadSheetProductEncoderDecoder(LeadSheetEncoderDecoder):
"""A LeadSheetEncoderDecoder that trivially encodes/decodes melody & chords.
The encoder/decoder uses a MelodyEncoderDecoder and a ChordsEncoderDecoder
and trivially combines them. The input is a concatenation of the melody and
chords inputs, and the output label is a product of the melody and chords
labels.
"""
def __init__(self, melody_encoder_decoder, chords_encoder_decoder):
self._melody_encoder_decoder = melody_encoder_decoder
self._chords_encoder_decoder = chords_encoder_decoder
@property
def min_note(self):
return self._melody_encoder_decoder.min_note
@property
def max_note(self):
return self._melody_encoder_decoder.max_note
@property
def transpose_to_key(self):
return self._melody_encoder_decoder.transpose_to_key
@property
def input_size(self):
return (self._melody_encoder_decoder.input_size +
self._chords_encoder_decoder.input_size)
@property
def num_classes(self):
return (self._melody_encoder_decoder.num_classes *
self._chords_encoder_decoder.num_classes)
def events_to_input(self, events, position):
"""Returns the input vector for the lead sheet event at the given position.
The input vector is the concatenation of the input vectors for the melody
and chords, using their respective encoder-decoders.
Args:
events: A LeadSheet object.
position: An integer event position in the lead sheet.
Returns:
An input vector, a self.input_size length list of floats.
"""
melody_input = self._melody_encoder_decoder.events_to_input(
events.melody, position)
chords_input = self.chords_encoder_decoder.events_to_input(
events.chords, position)
return melody_input + chords_input
def events_to_label(self, events, position):
"""Returns the label for the lead sheet event at the given position.
The label is a cartesian product of the melody label and chord label at the
given position, mapped to a single integer.
Args:
events: A LeadSheet object.
position: An integer event position in the lead sheet.
Returns:
A label, an integer in the range [0, self.num_classes).
"""
melody_label = self._melody_encoder_decoder.events_to_label(
events.melody, position)
chords_label = self._chords_encoder_decoder.events_to_label(
events.chords, position)
return melody_label + (self._melody_encoder_decoder.num_classes *
chords_label)
def class_index_to_event(self, class_index, events):
"""Returns the lead sheet event for the given class index.
This is the reverse process of the self.events_to_label method. The lead
sheet event will be a tuple, the first element of which is the melody event
and the second element of which is the chord event.
Args:
class_index: An integer in the range [0, self.num_classes).
events: A LeadSheet object.
Returns:
A lead sheet event value, a tuple containing the melody event and chord
event.
"""
melody_index = class_index % self._melody_encoder_decoder.num_classes
chord_index = class_index / self._melody_encoder_decoder.num_classes
return (
self._melody_encoder_decoder.class_index_to_event(melody_index,
events.melody),
self._chords_encoder_decoder.class_index_to_event(chord_index,
events.chords))
| |
'''
Created on Sep 15, 2013
@author: miguel
'''
from lxml import etree
from scrapy import log
from scrapy.conf import settings
from scrapy.contrib.exporter import XmlItemExporter, CsvItemExporter
from urlparse import urlparse
class RdfExporter(XmlItemExporter):
''' This exporter requieres that each item has for each field an
additional attribute with the same name ending by "_label" that
representes the tag label used. Samely, if the field is to be
a reference instead of a value use an additionsl field ending
by "_asAttr" whose value is True is needed
This exporter is deprecated in favour of jinja exporter
'''
NS = {'owl' : 'http://www.w3.org/2002/07/owl#',
'rdf' : 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
'xsd' : 'http://www.w3.org/2001/XMLSchema#',
'dcterms' : 'http://purl.org/dc/terms/',
'foaf' : 'http://xmlns.com/foaf/0.1/',
'tags' : 'http://www.holygoat.co.uk/owl/redwood/0.1/tags/',
'ewe' : 'http://gsi.dit.upm.es/ontologies/ewe/ns/',
None : 'http://gsi.dit.upm.es/ontologies/ewe/ns/'
}
def __init__(self, file, **kwargs):
self.file = file
def start_exporting(self):
self.root_element = etree.Element(self._parse_tagname('rdf:RDF'), nsmap=self.NS)
def export_item(self, item, append_to=None):
''' '''
log.msg("Export item with 'item':" + str(item) + " and 'append_to':" + str(append_to))
# Extract class string
if not item.ewe_class:
log.msg("Cannot find ewe_class", level=log.DEBUG)
elem_name = None
else:
elem_name = item.ewe_class
# Item root element and subClass entry
item_root = etree.Element(self._parse_tagname('owl:Class'), nsmap=self.NS)
subClass = etree.Element(self._parse_tagname('rdfs:SubClassOf'), nsmap=self.NS)
subClass.set(self._parse_tagname('rdf:resource'),
self._expand_tagname(elem_name))
item_root.append(subClass)
# # iterate over the fields
for field in item.keys():
# Continue for fields not populated
if not field in item:
log.msg('[RdfExporter] Found EMPTY field:\"' + field + '\".', level=log.DEBUG)
continue
#
value = item[field]
value_type = type(value)
log.msg('[RdfExporter] Found field:\"' + field + '\" of type:' + str(value_type), level=log.DEBUG)
# Give 'id' field special treatment
if field == 'id':
if value_type not in [str, unicode]:
log.msg("Id value cannot be of type " + str(value_type), level=log.ERROR)
continue
if not is_uri(value):
log.msg("Id is not a valud uri:" + str(value) , level=log.ERROR)
continue
item_root.set(self._parse_tagname('rdf:about'), value)
continue
# Fields whose content is unicode or str is use as...
if value_type in [str, unicode]:
elem = etree.SubElement(item_root, self._parse_tagname(getattr(item, field + '_label')), nsmap=self.NS)
if getattr(item, field + 'asAttr', False):
elem.set(self._parse_tagname('rdfs:resource'), value) # ...either resource...
else:
elem.text = value # ...or content
# Iterate over the lists
elif value_type in [list]:
log.msg("Exporting list field:" + field, level=log.WARNING)
for ith_value in value:
log.msg(">>" + str(type(ith_value)) + "-" + str(ith_value), level=log.WARNING)
elem = etree.SubElement(item_root, self._parse_tagname(getattr(item, field + '_label')), nsmap=self.NS)
if type(ith_value) in [str, unicode]:
if getattr(item, field + 'asAttr', False):
elem.set(self._parse_tagname('rdfs:resource'), ith_value) # ...either resource...
else:
elem.text = ith_value # ...or content
else:
pass
self.export_item(ith_value, elem)
else:
log.msg("By now " + str(value_type) + " are not exported:" + field, level=log.WARNING)
# Append generated element to either the parent element (if given) or the root_elem
if append_to is None:
log.msg("Used root element since append_to is " + str(append_to), level=log.DEBUG)
self.root_element.append(item_root)
else:
log.msg("Used append to " + str(append_to), level=log.DEBUG)
append_to.append(item_root)
print etree.tostring(item_root, pretty_print=True)
# tree = xml.ElementTree(item_root)
# self.file.write(etree.toprettyxml())
def finish_exporting(self):
self.file.write(etree.tostring(self.root_element, pretty_print=True))
self.file.close
def _export_xml_field(self, name, serialized_value):
pass
def _expand_tagname(self, tagname):
''' This expands the tagname, in case it shows namespace abbreviation,
to the full quialified form
Args:
tagname (str|unicode): The abbreviated tagname to use
Returns:
string|unicode The expanded tagname according to the NS dict
Raises:
KeyError If the namespage abbreviation is not defined in NS
>>> print _expand_tagname('owl:Class')
http://www.w3.org/2002/07/owl#Class
>>> print _expand_tagname('http://gsi.dit.upm.es/ontologies/ewe/ns/Channel')
http://gsi.dit.upm.es/ontologies/ewe/ns/Channel
'''
split = str.split(tagname, ':')
if len(split) == 1:
return tagname
if len(split) == 2:
return ''.join([self.NS[split[0]], split[1]])
# TODO
log.msg("The tag provided is not valid:" + str(tagname), level=log.ERROR)
def _parse_tagname(self, tagname):
''' This expands the tagname, in case it shows namespace abbreviation,
to the lxml namespace and element form
Args:
tagname (str|unicode): The abbreviated tagname to use
Returns:
string|unicode. The expanded string according to the NS dict
Raises:
KeyError If the namespage abbreviation is not defined in NS
>>> print _expand_tagname('owl:Class')
{http://www.w3.org/2002/07/owl#}Class
>>> print _expand_tagname('http://gsi.dit.upm.es/ontologies/ewe/ns/Channel')
http://gsi.dit.upm.es/ontologies/ewe/ns/Channel
'''
split = str.split(tagname, ':')
if len(split) == 1:
return tagname
if len(split) == 2:
return '{%s}%s' % (self.NS[split[0]], split[1])
# TODO
log.msg("The tag provided is not valid:" + str(tagname), level=log.ERROR)
def is_uri(string):
''' This checks wheater a string is a valid uri.
It is considered a string is a valid uri if after parsing it using
the urlparse function a scheme and a netloc are extracted.
Args:
string (str|unicode): The string to check
Returns:
bool. True if the argument is a valid uri. False in other case.
>>> id_uri("http://www.w3.org/2002/07/owl")
True
>>> id_uri("http://www.w3.org/2002/07/owl#Class")
True
>>> id_uri("www.w3.org")
False
'''
url = urlparse(string)
if url.scheme and url.netloc:
return True
return False
| |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import optparse
import textwrap
from blinkpy.common.checkout.git_mock import MockGit
from blinkpy.common.net.git_cl import TryJobStatus
from blinkpy.common.net.git_cl_mock import MockGitCL
from blinkpy.common.net.results_fetcher import Build
from blinkpy.common.net.web_test_results import WebTestResults
from blinkpy.common.path_finder import RELATIVE_WEB_TESTS
from blinkpy.common.system.log_testing import LoggingTestCase
from blinkpy.tool.commands.rebaseline import TestBaselineSet
from blinkpy.tool.commands.rebaseline_cl import RebaselineCL
from blinkpy.tool.commands.rebaseline_unittest import BaseTestCase
from blinkpy.web_tests.builder_list import BuilderList
class RebaselineCLTest(BaseTestCase, LoggingTestCase):
command_constructor = RebaselineCL
command_constructor.flag_specific_builder = (
lambda self, flag_specific: frozenset(["MOCK Try Highdpi"]))
def setUp(self):
BaseTestCase.setUp(self)
LoggingTestCase.setUp(self)
self.maxDiff = None
self.builds = {
Build('MOCK Try Win', 5000, 'Build-1'):
TryJobStatus('COMPLETED', 'FAILURE'),
Build('MOCK Try Mac', 4000, 'Build-2'):
TryJobStatus('COMPLETED', 'FAILURE'),
Build('MOCK Try Linux', 6000, 'Build-3'):
TryJobStatus('COMPLETED', 'FAILURE'),
# Test the special case for experimental builders
# Highdpi is an experimental builder whose status
# is returned as ('COMPLETED', 'SUCCESS') even with failures.
Build('MOCK Try Highdpi', 8000, 'Build-4'):
TryJobStatus('COMPLETED', 'SUCCESS'),
}
self.command.git_cl = MockGitCL(self.tool, self.builds)
git = MockGit(
filesystem=self.tool.filesystem, executive=self.tool.executive)
git.changed_files = lambda **_: [
RELATIVE_WEB_TESTS + 'one/text-fail.html',
RELATIVE_WEB_TESTS + 'one/flaky-fail.html', ]
self.tool.git = lambda: git
self.tool.builders = BuilderList({
'MOCK Try Win': {
'port_name': 'test-win-win7',
'specifiers': ['Win7', 'Release'],
'is_try_builder': True,
},
'MOCK Try Linux': {
'port_name': 'test-linux-trusty',
'specifiers': ['Trusty', 'Release'],
'is_try_builder': True,
},
'MOCK Try Mac': {
'port_name': 'test-mac-mac10.11',
'specifiers': ['Mac10.11', 'Release'],
'is_try_builder': True,
},
'MOCK Try Highdpi': {
'port_name': 'test-linux-trusty',
'specifiers': ['trusty', 'Release'],
"flag_specific": "highdpi",
},
})
web_test_results = WebTestResults({
'tests': {
'one': {
'crash.html': {
'expected': 'PASS',
'actual': 'CRASH',
'is_unexpected': True,
'artifacts': {
'crash_log': ['crash.log']
}
},
'expected-fail.html': {
'expected': 'FAIL',
'actual': 'FAIL',
'artifacts': {
'expected_text': ['expected-fail-expected.txt'],
'actual_text': ['expected-fail-actual.txt']
}
},
'flaky-fail.html': {
'expected': 'PASS',
'actual': 'PASS FAIL',
'is_unexpected': True,
'artifacts': {
'expected_audio': ['flaky-fail-expected.wav'],
'actual_audio': ['flaky-fail-actual.wav']
}
},
'missing.html': {
'expected': 'PASS',
'actual': 'FAIL',
'is_unexpected': True,
'artifacts': {
'actual_image': ['missing-actual.png']
},
'is_missing_image': True
},
'slow-fail.html': {
'expected': 'SLOW',
'actual': 'FAIL',
'is_unexpected': True,
'artifacts': {
'actual_text': ['slow-fail-actual.txt'],
'expected_text': ['slow-fail-expected.txt']
}
},
'text-fail.html': {
'expected': 'PASS',
'actual': 'FAIL',
'is_unexpected': True,
'artifacts': {
'actual_text': ['text-fail-actual.txt'],
'expected_text': ['text-fail-expected.txt']
}
},
'unexpected-pass.html': {
'expected': 'FAIL',
'actual': 'PASS',
'is_unexpected': True
},
},
'two': {
'image-fail.html': {
'expected': 'PASS',
'actual': 'FAIL',
'is_unexpected': True,
'artifacts': {
'actual_image': ['image-fail-actual.png'],
'expected_image': ['image-fail-expected.png']
}
}
},
},
})
self.web_test_resultsdb = WebTestResults([{
"name": "tests/two/image-fail.html/results/2",
"testId": "ninja://:blink_web_tests/two/image-fail.html",
"resultId": "2",
"variant": {
"def": {
"builder": "",
"os": "",
"test_suite": "blink_web_tests"
}
},
"status": "FAIL"
}, {
"name": "tests/one/missing.html/results/1",
"testId": "ninja://:blink_web_tests/one/missing.html",
"resultId": "1",
"variant": {
"def": {
"builder": "",
"os": "",
"test_suite": "blink_web_tests"
}
},
"status": "FAIL"
}, {
"name": "tests/one/crash.html/results/3",
"testId": "ninja://:blink_web_tests/one/crash.html",
"resultId": "3",
"variant": {
"def": {
"builder": "",
"os": "",
"test_suite": "blink_web_tests"
}
},
"status": "CRASH"
}])
self.test_artifacts_list = {
"tests/one/missing.html/results/1": [{
"name":
"invocations/task-chromium-swarm.appspot.com-1/tests/ninja:%2F%2F:blink_web_tests%2Fone%2Fmissing.html/results/1",
"artifactId": "actual_image",
"fetchUrl":
"https://results.usercontent.cr.dev/invocations/task-chromium-swarm.appspot.com-1/tests/ninja:%2F%2F:blink_web_tests%2Fone%2Fmissing.html/results/artifacts/actual_image?token=1",
"contentType": "image/png",
}],
"tests/two/image-fail.html/results/2": [{
"name":
"invocations/task-chromium-swarm.appspot.com-2/tests/ninja:%2F%2F:blink_web_tests%2Ftwo%2Fimage-fail.html/results/2",
"artifactId": "actual_image",
"fetchUrl":
"https://results.usercontent.cr.dev/invocations/task-chromium-swarm.appspot.com-2/tests/ninja:%2F%2F:blink_web_tests%2Ftwo%2Fimage-fail.html/results/artifacts/actual_image?token=2",
"contentType": "image/png",
}],
"tests/one/crash.html/results/3": [{
"name":
"invocations/task-chromium-swarm.appspot.com-2/tests/ninja:%2F%2F:blink_web_tests%2Ftwo%2Fcrash.html/results/3",
"artifactId": "actual_text",
"fetchUrl":
"https://results.usercontent.cr.dev/invocations/task-chromium-swarm.appspot.com-2/tests/ninja:%2F%2F:blink_web_tests%2Fone%2Fcrash.html/results/artifacts/actual_text?token=3",
"contentType": "text",
}]
}
for build in self.builds:
self.tool.results_fetcher.set_results(build, web_test_results)
self.tool.results_fetcher.set_retry_sumary_json(
build,
json.dumps({
'failures': [
'one/flaky-fail.html',
'one/missing.html',
'one/slow-fail.html',
'one/text-fail.html',
'two/image-fail.html',
],
'ignored': [],
}))
# Write to the mock filesystem so that these tests are considered to exist.
tests = [
'one/flaky-fail.html',
'one/missing.html',
'one/slow-fail.html',
'one/text-fail.html',
'two/image-fail.html',
]
for test in tests:
path = self.mac_port.host.filesystem.join(
self.mac_port.web_tests_dir(), test)
self._write(path, 'contents')
self.mac_port.host.filesystem.write_text_file(
'/test.checkout/web_tests/external/wpt/MANIFEST.json', '{}')
def tearDown(self):
BaseTestCase.tearDown(self)
LoggingTestCase.tearDown(self)
@staticmethod
def command_options(**kwargs):
options = {
'dry_run': False,
'only_changed_tests': False,
'trigger_jobs': True,
'fill_missing': None,
'optimize': True,
'results_directory': None,
'test_name_file': None,
'verbose': False,
'builders': [],
'patchset': None,
'use_blink_try_bots_only': False,
'flag_specific': None,
'resultDB': None
}
options.update(kwargs)
return optparse.Values(dict(**options))
@staticmethod
def command_options_resultDB(**kwargs):
options = {
'dry_run': False,
'only_changed_tests': False,
'trigger_jobs': True,
'fill_missing': None,
'optimize': True,
'results_directory': None,
'test_name_file': None,
'verbose': False,
'builders': [],
'patchset': None,
'use_blink_try_bots_only': False,
'flag_specific': None,
'resultDB': True
}
options.update(kwargs)
return optparse.Values(dict(**options))
def test_execute_basic(self):
# By default, with no arguments or options, rebaseline-cl rebaselines
# all of the tests that unexpectedly failed.
exit_code = self.command.execute(self.command_options(), [], self.tool)
self.assertEqual(exit_code, 0)
self.assertLog([
'INFO: Finished try jobs found for all try bots.\n',
'INFO: Rebaselining one/flaky-fail.html\n',
'INFO: Rebaselining one/missing.html\n',
'INFO: Rebaselining one/slow-fail.html\n',
'INFO: Rebaselining one/text-fail.html\n',
'INFO: Rebaselining two/image-fail.html\n',
])
def test_execute_basic_resultDB(self):
# By default, with no arguments or options, rebaseline-cl rebaselines
# all of the tests that unexpectedly failed.
for build in self.builds:
self.tool.results_fetcher.set_results_to_resultdb(
build, self.web_test_resultsdb)
self.tool.results_fetcher.set_artifact_list_for_test(
build, self.test_artifacts_list)
exit_code = self.command.execute(self.command_options_resultDB(), [],
self.tool)
self.assertEqual(exit_code, 0)
self.assertLog([
'INFO: Finished try jobs found for all try bots.\n',
'INFO: Rebaselining one/missing.html\n',
'INFO: Rebaselining two/image-fail.html\n',
])
def test_execute_with_test_name_file(self):
fs = self.mac_port.host.filesystem
test_name_file = fs.mktemp()
file = fs.open_text_file_for_writing(test_name_file)
file.write(
textwrap.dedent('''
one/flaky-fail.html
one/missing.html
# one/slow-fail.html
#
one/text-fail.html
two/image-fail.html '''))
exit_code = self.command.execute(
self.command_options(test_name_file=test_name_file), [], self.tool)
self.assertEqual(exit_code, 0)
self.assertLog([
'INFO: Finished try jobs found for all try bots.\n',
'INFO: Reading list of tests to rebaseline from %s\n' %
test_name_file,
'INFO: Rebaselining one/flaky-fail.html\n',
'INFO: Rebaselining one/missing.html\n',
'INFO: Rebaselining one/text-fail.html\n',
'INFO: Rebaselining two/image-fail.html\n',
])
def test_execute_with_test_name_file_resultDB(self):
fs = self.mac_port.host.filesystem
test_name_file = fs.mktemp()
file = fs.open_text_file_for_writing(test_name_file)
file.write(
textwrap.dedent('''
one/missing.html
two/missing.html
# one/slow-fail.html
#
two/image-fail.html '''))
for build in self.builds:
self.tool.results_fetcher.set_results_to_resultdb(
build, self.web_test_resultsdb)
self.tool.results_fetcher.set_artifact_list_for_test(
build, self.test_artifacts_list)
exit_code = self.command.execute(
self.command_options(test_name_file=test_name_file, resultDB=True),
[], self.tool)
self.assertEqual(exit_code, 0)
self.assertLog([
'INFO: Finished try jobs found for all try bots.\n',
'INFO: Reading list of tests to rebaseline from %s\n' %
test_name_file,
'INFO: Rebaselining one/missing.html\n',
'INFO: Rebaselining two/image-fail.html\n',
])
def test_execute_with_no_issue_number_aborts(self):
# If the user hasn't uploaded a CL, an error message is printed.
self.command.git_cl = MockGitCL(self.tool, issue_number='None')
exit_code = self.command.execute(self.command_options(), [], self.tool)
self.assertEqual(exit_code, 1)
self.assertLog(['ERROR: No issue number for current branch.\n'])
def test_execute_with_unstaged_baselines_aborts(self):
git = self.tool.git()
git.unstaged_changes = lambda: {
RELATIVE_WEB_TESTS + 'my-test-expected.txt': '?'
}
exit_code = self.command.execute(self.command_options(), [], self.tool)
self.assertEqual(exit_code, 1)
self.assertLog([
'ERROR: Aborting: there are unstaged baselines:\n',
'ERROR: /mock-checkout/' + RELATIVE_WEB_TESTS +
'my-test-expected.txt\n',
])
def test_execute_no_try_jobs_started_triggers_jobs(self):
# If there are no try jobs started yet, by default the tool will
# trigger new try jobs.
self.command.git_cl = MockGitCL(self.tool, {})
exit_code = self.command.execute(self.command_options(), [], self.tool)
self.assertEqual(exit_code, 1)
self.assertLog([
'INFO: No finished try jobs.\n', 'INFO: Triggering try jobs:\n',
'INFO: MOCK Try Linux\n', 'INFO: MOCK Try Mac\n',
'INFO: MOCK Try Win\n',
'INFO: Once all pending try jobs have finished, please re-run\n'
'blink_tool.py rebaseline-cl to fetch new baselines.\n'
])
def test_execute_no_try_jobs_started_and_no_trigger_jobs(self):
# If there are no try jobs started yet and --no-trigger-jobs is passed,
# then we just abort immediately.
self.command.git_cl = MockGitCL(self.tool, {})
exit_code = self.command.execute(
self.command_options(trigger_jobs=False), [], self.tool)
self.assertEqual(exit_code, 1)
self.assertLog([
'INFO: No finished try jobs.\n',
'INFO: Aborted: no try jobs and --no-trigger-jobs passed.\n',
])
def test_execute_one_missing_build(self):
builds = {
Build('MOCK Try Win', 5000): TryJobStatus('COMPLETED', 'FAILURE'),
Build('MOCK Try Mac', 4000): TryJobStatus('COMPLETED', 'FAILURE'),
}
self.command.git_cl = MockGitCL(self.tool, builds)
exit_code = self.command.execute(self.command_options(), [], self.tool)
self.assertEqual(exit_code, 1)
self.assertLog([
'INFO: Finished try jobs:\n',
'INFO: MOCK Try Mac\n',
'INFO: MOCK Try Win\n',
'INFO: Triggering try jobs:\n',
'INFO: MOCK Try Linux\n',
'INFO: Once all pending try jobs have finished, please re-run\n'
'blink_tool.py rebaseline-cl to fetch new baselines.\n',
])
def test_execute_with_unfinished_jobs(self):
builds = {
Build('MOCK Try Win', 5000, 'Build-1'):
TryJobStatus('COMPLETED', 'FAILURE'),
Build('MOCK Try Mac', 4000, 'Build-2'):
TryJobStatus('STARTED'),
Build('MOCK Try Linux', 6000, 'Build-3'):
TryJobStatus('SCHEDULED'),
}
self.command.git_cl = MockGitCL(self.tool, builds)
exit_code = self.command.execute(self.command_options(), [], self.tool)
self.assertEqual(exit_code, 1)
self.assertLog([
'INFO: Finished try jobs:\n',
'INFO: MOCK Try Win\n',
'INFO: Scheduled or started try jobs:\n',
'INFO: MOCK Try Linux\n',
'INFO: MOCK Try Mac\n',
'INFO: There are some builders with no results:\n',
'INFO: MOCK Try Linux\n',
'INFO: MOCK Try Mac\n',
'INFO: Would you like to continue?\n',
'INFO: Aborting.\n',
])
def test_execute_with_canceled_job(self):
builds = {
Build('MOCK Try Win', 5000, 'Build-1'):
TryJobStatus('COMPLETED', 'FAILURE'),
Build('MOCK Try Mac', 4000, 'Build-2'):
TryJobStatus('COMPLETED', 'FAILURE'),
Build('MOCK Try Linux', 6000, 'Build-3'):
TryJobStatus('COMPLETED', 'CANCELED'),
}
self.command.git_cl = MockGitCL(self.tool, builds)
exit_code = self.command.execute(self.command_options(), [], self.tool)
self.assertEqual(exit_code, 1)
self.assertLog([
'INFO: Finished try jobs found for all try bots.\n',
'INFO: There are some builders with no results:\n',
'INFO: MOCK Try Linux\n',
'INFO: Would you like to continue?\n',
'INFO: Aborting.\n',
])
def test_execute_with_passing_jobs(self):
builds = {
Build('MOCK Try Win', 5000, 'Build-1'):
TryJobStatus('COMPLETED', 'FAILURE'),
Build('MOCK Try Mac', 4000, 'Build-2'):
TryJobStatus('COMPLETED', 'SUCCESS'),
Build('MOCK Try Linux', 6000, 'Build-3'):
TryJobStatus('COMPLETED', 'SUCCESS'),
}
self.command.git_cl = MockGitCL(self.tool, builds)
exit_code = self.command.execute(self.command_options(), [], self.tool)
self.assertEqual(exit_code, 0)
self.assertLog([
'INFO: Finished try jobs found for all try bots.\n',
'INFO: Rebaselining one/flaky-fail.html\n',
'INFO: Rebaselining one/missing.html\n',
'INFO: Rebaselining one/slow-fail.html\n',
'INFO: Rebaselining one/text-fail.html\n',
'INFO: Rebaselining two/image-fail.html\n'
])
def test_execute_with_no_trigger_jobs_option(self):
builds = {
Build('MOCK Try Win', 5000, 'Build-1'):
TryJobStatus('COMPLETED', 'FAILURE'),
Build('MOCK Try Mac', 4000, 'Build-2'):
TryJobStatus('COMPLETED', 'FAILURE'),
}
self.command.git_cl = MockGitCL(self.tool, builds)
exit_code = self.command.execute(
self.command_options(trigger_jobs=False), [], self.tool)
self.assertEqual(exit_code, 1)
self.assertLog([
'INFO: Finished try jobs:\n',
'INFO: MOCK Try Mac\n',
'INFO: MOCK Try Win\n',
'INFO: There are some builders with no results:\n',
'INFO: MOCK Try Linux\n',
'INFO: Would you like to continue?\n',
'INFO: Aborting.\n',
])
def test_execute_with_only_changed_tests_option(self):
# When --only-changed-tests is passed, the tool only rebaselines tests
# that were modified in the CL.
exit_code = self.command.execute(
self.command_options(only_changed_tests=True), [], self.tool)
self.assertEqual(exit_code, 0)
self.assertLog([
'INFO: Finished try jobs found for all try bots.\n',
'INFO: Rebaselining one/flaky-fail.html\n',
'INFO: Rebaselining one/text-fail.html\n',
])
def test_execute_with_test_that_fails_on_retry(self):
# In this example, one test failed both with and without the patch
# in the try job, so it is not rebaselined.
builds = {
Build('MOCK Try Win', 5000, 'Build-1'):
TryJobStatus('COMPLETED', 'FAILURE'),
Build('MOCK Try Mac', 4000, 'Build-2'):
TryJobStatus('COMPLETED', 'FAILURE'),
Build('MOCK Try Linux', 6000, 'Build-3'):
TryJobStatus('COMPLETED', 'FAILURE'),
}
for build in builds:
self.tool.results_fetcher.set_retry_sumary_json(
build,
json.dumps({
'failures': ['one/text-fail.html'],
'ignored': ['two/image-fail.html'],
}))
exit_code = self.command.execute(self.command_options(), [], self.tool)
self.assertEqual(exit_code, 0)
self.assertLog([
'INFO: Finished try jobs found for all try bots.\n',
'INFO: Rebaselining one/text-fail.html\n',
])
def test_execute_with_no_retry_summary_downloaded(self):
# In this example, the retry summary could not be downloaded, so
# a warning is printed and all tests are rebaselined.
self.tool.results_fetcher.set_retry_sumary_json(
Build('MOCK Try Win', 5000, 'Build-1'), None)
exit_code = self.command.execute(self.command_options(), [], self.tool)
self.assertEqual(exit_code, 0)
self.assertLog([
'INFO: Finished try jobs found for all try bots.\n',
'WARNING: No retry summary available for "MOCK Try Win".\n',
'INFO: Rebaselining one/flaky-fail.html\n',
'INFO: Rebaselining one/missing.html\n',
'INFO: Rebaselining one/slow-fail.html\n',
'INFO: Rebaselining one/text-fail.html\n',
'INFO: Rebaselining two/image-fail.html\n',
])
def test_execute_with_flag_specific_experimental(self):
exit_code = self.command.execute(
self.command_options(flag_specific='highdpi'), [], self.tool)
self.assertEqual(exit_code, 0)
self.assertLog([
'INFO: Finished try jobs found for all try bots.\n',
'INFO: Rebaselining one/flaky-fail.html\n',
'INFO: Rebaselining one/missing.html\n',
'INFO: Rebaselining one/slow-fail.html\n',
'INFO: Rebaselining one/text-fail.html\n',
'INFO: Rebaselining two/image-fail.html\n',
])
def test_rebaseline_command_invocations(self):
"""Tests the list of commands that are called for rebaselining."""
# First write test contents to the mock filesystem so that
# one/flaky-fail.html is considered a real test to rebaseline.
port = self.tool.port_factory.get('test-win-win7')
path = port.host.filesystem.join(port.web_tests_dir(),
'one/flaky-fail.html')
self._write(path, 'contents')
test_baseline_set = TestBaselineSet(self.tool)
test_baseline_set.add('one/flaky-fail.html',
Build('MOCK Try Win', 5000, 'Build-1'))
self.command.rebaseline(self.command_options(), test_baseline_set)
self.assertEqual(self.tool.executive.calls,
[[[
'python',
'echo',
'copy-existing-baselines-internal',
'--test',
'one/flaky-fail.html',
'--suffixes',
'wav',
'--port-name',
'test-win-win7',
]],
[[
'python',
'echo',
'rebaseline-test-internal',
'--test',
'one/flaky-fail.html',
'--suffixes',
'wav',
'--port-name',
'test-win-win7',
'--builder',
'MOCK Try Win',
'--build-number',
'5000',
'--step-name',
'blink_web_tests (with patch)',
]],
[[
'python',
'echo',
'optimize-baselines',
'--no-manifest-update',
'--suffixes',
'wav',
'one/flaky-fail.html',
]]])
def test_trigger_try_jobs(self):
# The trigger_try_jobs method just uses git cl to trigger jobs for
# the given builders.
self.command.trigger_try_jobs(['MOCK Try Linux', 'MOCK Try Win'])
self.assertEqual(self.command.git_cl.calls, [[
'git', 'cl', 'try', '-B', 'luci.chromium.try', '-b',
'MOCK Try Linux', '-b', 'MOCK Try Win'
]])
self.assertLog([
'INFO: Triggering try jobs:\n',
'INFO: MOCK Try Linux\n',
'INFO: MOCK Try Win\n',
'INFO: Once all pending try jobs have finished, please re-run\n'
'blink_tool.py rebaseline-cl to fetch new baselines.\n',
])
def test_execute_missing_results_with_no_fill_missing_prompts(self):
self.tool.results_fetcher.set_results(
Build('MOCK Try Win', 5000, 'Build-1'), None)
exit_code = self.command.execute(self.command_options(), [], self.tool)
self.assertEqual(exit_code, 1)
self.assertLog([
'INFO: Finished try jobs found for all try bots.\n',
'INFO: Failed to fetch results for "MOCK Try Win".\n',
('INFO: Results URL: https://test-results.appspot.com/data/layout_results/'
'MOCK_Try_Win/5000/blink_web_tests%20%28with%20patch%29/layout-test-results/results.html\n'
),
'INFO: There are some builders with no results:\n',
'INFO: MOCK Try Win\n',
'INFO: Would you like to continue?\n',
'INFO: Aborting.\n',
])
def test_execute_missing_results_with_fill_missing_continues(self):
self.tool.results_fetcher.set_results(
Build('MOCK Try Win', 5000, 'Build-1'), None)
exit_code = self.command.execute(
self.command_options(fill_missing=True), ['one/flaky-fail.html'],
self.tool)
self.assertEqual(exit_code, 0)
self.assertLog([
'INFO: Finished try jobs found for all try bots.\n',
'INFO: Failed to fetch results for "MOCK Try Win".\n',
('INFO: Results URL: https://test-results.appspot.com/data/layout_results/'
'MOCK_Try_Win/5000/blink_web_tests%20%28with%20patch%29/layout-test-results/results.html\n'
), 'INFO: There are some builders with no results:\n',
'INFO: MOCK Try Win\n', 'INFO: For one/flaky-fail.html:\n',
'INFO: Using "MOCK Try Highdpi" build 8000 for test-win-win7.\n',
'INFO: Rebaselining one/flaky-fail.html\n'
])
def test_fill_in_missing_results(self):
test_baseline_set = TestBaselineSet(self.tool)
test_baseline_set.add('one/flaky-fail.html',
Build('MOCK Try Linux', 100))
test_baseline_set.add('one/flaky-fail.html', Build(
'MOCK Try Win', 200))
self.command.fill_in_missing_results(test_baseline_set)
self.assertEqual(
test_baseline_set.build_port_pairs('one/flaky-fail.html'), [
(Build('MOCK Try Linux', 100), 'test-linux-trusty'),
(Build('MOCK Try Win', 200), 'test-win-win7'),
(Build('MOCK Try Linux', 100), 'test-mac-mac10.11'),
])
self.assertLog([
'INFO: For one/flaky-fail.html:\n',
'INFO: Using "MOCK Try Linux" build 100 for test-mac-mac10.11.\n',
])
def test_fill_in_missing_results_prefers_build_with_same_os_type(self):
self.tool.builders = BuilderList({
'MOCK Foo12': {
'port_name': 'foo-foo12',
'specifiers': ['Foo12', 'Release'],
'is_try_builder': True,
},
'MOCK Foo45': {
'port_name': 'foo-foo45',
'specifiers': ['Foo45', 'Release'],
'is_try_builder': True,
},
'MOCK Bar3': {
'port_name': 'bar-bar3',
'specifiers': ['Bar3', 'Release'],
'is_try_builder': True,
},
'MOCK Bar4': {
'port_name': 'bar-bar4',
'specifiers': ['Bar4', 'Release'],
'is_try_builder': True,
},
})
test_baseline_set = TestBaselineSet(self.tool)
test_baseline_set.add('one/flaky-fail.html', Build('MOCK Foo12', 100))
test_baseline_set.add('one/flaky-fail.html', Build('MOCK Bar4', 200))
self.command.fill_in_missing_results(test_baseline_set)
self.assertEqual(
sorted(test_baseline_set.build_port_pairs('one/flaky-fail.html')),
[
(Build('MOCK Bar4', 200), 'bar-bar3'),
(Build('MOCK Bar4', 200), 'bar-bar4'),
(Build('MOCK Foo12', 100), 'foo-foo12'),
(Build('MOCK Foo12', 100), 'foo-foo45'),
])
self.assertLog([
'INFO: For one/flaky-fail.html:\n',
'INFO: Using "MOCK Foo12" build 100 for foo-foo45.\n',
'INFO: Using "MOCK Bar4" build 200 for bar-bar3.\n',
])
def test_explicit_builder_list(self):
builders = ['MOCK Try Linux', 'MOCK Try Mac']
options = self.command_options(builders=builders)
exit_code = self.command.execute(options, [], self.tool)
self.assertLog([
'INFO: Finished try jobs found for all try bots.\n',
'INFO: Rebaselining one/flaky-fail.html\n',
'INFO: Rebaselining one/missing.html\n',
'INFO: Rebaselining one/slow-fail.html\n',
'INFO: Rebaselining one/text-fail.html\n',
'INFO: Rebaselining two/image-fail.html\n',
])
self.assertEqual(exit_code, 0)
self.assertEqual(self.command.selected_try_bots, frozenset(builders))
| |
'''
Solves numerically and analyses the stability of the solution for the
simple pendulumusing scaled, dimensionless variables.
'''
import numpy as np
import matplotlib.pylab as plt
class Bob(object):
'''
Represents the moving object in a single pendulum.
All attributes are private and the user is advised to set them only via the
setter methods provided, but underscores have been omitted for clarity.
'''
def __init__(self, t_f, G, y0, h):
'''
Parameters:
t_f - scaled duration of experiment
G - scaled damping coefficient
y0 - initial angular amplitude and velocity, y0 = (theta, theta-dot)
h - scaled time step
All parameters are numbers, except y0 which is a list of two numbers.
'''
self.tf = float(t_f)
self.G = float(G)
self.y0 = np.array(y0, dtype=float)
self.h = float(h)
self.t = np.linspace(0, self.tf, int(self.tf/self.h))
self.y = np.zeros((self.y0.size, self.t.size)); self.y[:,0] = self.y0
def set_tf(self, tf, silent=False):
self.tf = tf
self.t = np.linspace(0, self.tf, int(self.tf/self.h))
self.y = np.zeros((self.y0.size, self.t.size)); self.y[:,0] = self.y0
if not silent: print self
def set_G(self, G, silent=False):
self.G = G
if not silent: print self
def set_y0(self, y0, silent=False):
self.y0 = np.array(y0, dtype=float)
self.y[:,0] = self.y0
if not silent: print self
def set_h(self, h, silent=False):
self.h = h
self.t = np.linspace(0, self.tf, int(self.tf/self.h))
self.y = np.zeros((self.y0.size, self.t.size)); self.y[:,0] = self.y0
if not silent: print self
def __repr__(self):
"""
Provides information on Bob object.
"""
return "Bob(t_f = {0}, G = {1}, y0 = {2}, h = {3})".format(
self.tf, self.G, self.y0, self.h)
def explicit_euler(self):
'''
Solves system using the explicit Euler method.
'''
f = np.array([ [1, self.h], [-self.h, (1-self.h*self.G)] ])
for i in range(1, self.t.size):
self.y[:,i] = f.dot(self.y[:,i-1])
def leapfrog(self):
'''
Solves system using the leapfrog method. To get the second angle of
motion, an explicit Euler method with 1/10 of the original time step is
implemented.
'''
h = 0.1*self.h
tf = self.t[1]
t = np.linspace(0, tf, int(tf/h))
y = np.zeros((self.y0.size, t.size)); y[:,0] = self.y0
f = np.array([ [1, h], [-h, (1-h*self.G)] ])
for i in range(1, t.size):
y[:,i] = f.dot(y[:,i-1])
self.y[:,1] = y[:,-1]
f = np.array([ [0, 2*self.h], [-2*self.h, -2*self.h*self.G] ])
for i in range(2, self.t.size):
self.y[:,i] = self.y[:,i-2] + f.dot(self.y[:,i-1])
def runge_kutta(self):
'''
Solves system using the Runge-Kutta 4 method.
'''
f = np.array([ [0, 1], [-1, -self.G] ])
for i in range(1, self.t.size):
k1 = f.dot(self.y[:, i-1])
y2 = self.y[:,i-1] + 0.5*self.h*k1
k2 = f.dot(y2)
y3 = self.y[:,i-1] + 0.5*self.h*k2
k3 = f.dot(y3)
y4 = self.y[:,i-1] + 1.*self.h*k3
k4 = f.dot(y4)
self.y[:,i] = self.y[:,i-1] + self.h/6. * (k1 + 2*k2 + 2*k3 + k4)
def implicit_euler(self):
'''
Solves system using the implicit Euler method.
'''
u, v = self.y
for i in range(1, self.t.size):
v[i] = (v[i-1] - self.h*u[i-1])/(1+self.h*self.G+self.h*self.h)
u[i] = u[i-1] + self.h*v[i]
self.y[0], self.y[1] = u, v
def exact_euler(self):
'''
Solves system using the explicit Euler method, but not the small
angle approximation.
'''
u, v = self.y
for i in range(1, self.t.size):
v[i] = -self.h*np.sin(u[i-1]) + (1-self.h*self.G)*v[i-1]
u[i] = u[i-1] + self.h*v[i-1]
self.y[0], self.y[1] = u, v
def method(self, meth='rk'):
'''
Chooses method to solve the system. Parameter meth can be:
ee - explicit_euler
lp - leapfrog
rk - runge_kutta
ie - implicit_euler
ce - exact_euler
'''
if meth=='ee':
self.explicit_euler()
if meth=='lp':
self.leapfrog()
if meth=='rk':
self.runge_kutta()
if meth=='ie':
self.implicit_euler()
if meth=='ce':
self.exact_euler()
def energy_ratio(self, meth='rk', plot_energy=True):
'''
Returns and can plot energy over initial energy for given method.
Potential energy is set to 0 at the lowest level the pendulum reaches.
'''
self.method(meth)
y = self.y
E = 0.5*y[1]*y[1] + (1 - np.cos(y[0]))
if plot_energy:
plt.plot(self.t, E/E[0])
plt.xlabel('Time')
plt.ylabel('Fractional energy')
return E/E[0]
def is_stable(self, ratio, meth, low_bounds=False):
'''
Returns criteria for stability.
Parameter low_bounds checks for energy decay in an undamped case.
Leapfrog and exact euler present big fluctuations.
'''
if meth=='lp': M=100.
elif meth=='ce': M=1.e15
else: M = 1.1
m = 0.9
if low_bounds:
return ratio > m
return ratio < M
def stab_analysis(self, meth='rk', dps=3):
'''
Analyses stability of system and returns whether it is unconditionally
stable, unstable or conditionally stable along with the threshold of
time step for stability.
Determines if energy blows up and at what time step, but also warns
about unexpected energy decay in the undamped cases.
Parameter dps is number of decimal places counted for accuracy.
'''
# Set reasonable values for h and tf to speed up the algorithm
if meth=='ce':
self.set_h(4, silent=True)
self.set_tf(100000, silent=True)
else:
self.set_h(0.1, silent=True)
self.set_tf(1000, silent=True)
# Set up the bisection method to find critical h
hmin = 5e-4
if meth=='ce': hmax = 1000
else: hmax = 10.
h_prev = hmax
diff = abs(h_prev-self.h)
# Run algorithm
ratio = self.energy_ratio(meth, plot_energy=False)[-1]
stable = self.is_stable(ratio, meth)
while not stable and self.h>=hmin:
self.set_h(0.1*self.h, silent=True)
ratio = self.energy_ratio(meth, plot_energy=False)[-1]
stable = self.is_stable(ratio, meth)
while stable and (self.h<=hmax) and (diff>10**(-dps)):
self.set_h(0.5*(self.h+h_prev), silent=True)
diff = abs(h_prev-self.h)
ratio = self.energy_ratio(meth, plot_energy=False)[-1]
stable = self.is_stable(ratio, meth)
while not stable:
h_prev = self.h
self.set_h(self.h-diff/2., silent=True)
ratio = self.energy_ratio(meth, plot_energy=False)[-1]
stable = self.is_stable(ratio, meth)
# Check for energy decay just before blow-up
if stable and self.G==0.:
hi = self.h
self.set_h(self.h-10.*hmin, silent=True)
ratio = self.energy_ratio(meth, plot_energy=False)[-1]
stable = self.is_stable(ratio, meth, low_bounds=True)
if not stable:
print "\nEnergy decays unexpectedly."
elif stable:
print "\nEnergy does not decay."
self.set_h(hi, silent=True)
# Return results of stability analysis
if self.h<hmin:
print "\nUnconditionally unstable. Energy blows up.\n"
print self
return None
if self.h>(hmax-10.*diff):
print "\nUnconditionally stable.\n"
print self
return None
if diff<10**(-dps):
print '\nConditionally stable under h_crit = {0}\n'.format(self.h)
print self
return self.h
| |
from __future__ import division
from __future__ import print_function
from myhdl import *
from mysigs import Clock, Reset
from vga_intf import System
from vga_intf import VGA
from vga_intf import VideoMemory
from vga_timing_params import calc_timings
def m_vga_sync(
# [ports and interfaces}
sys, # system bundle of signals, clock, reset
vga, # signals for the VGA
vmem, # the video memory interface
# [parameters]
resolution = (640,480,), # resolution in pixels
refresh_rate = 60, # refresh rate in Hz (vertical rate)
line_rate = 31250 # line rate in Hz (horizontral rate)
):
"""
The following is the generation of the signals required
to drive a VGA display. This implementation is derived
from the pseudo code provide here:
http://hamsterworks.co.nz/mediawiki/index.php/Module_11
Well isn't that nice - the Python/MyHDL implementation is
very close to the "pseudo code"!
Also, this module is intended to be parameterizable and
modular based on the desired video settings
clock.frequency - the clock used to generate the pixel
clock
video_resolution - in pixels, tVGA resolution
refresh_rate - in Hz, default 60
line_rate - in Hz, default is 31,250
Ports (arguments):
------------------
sys.clock : system synchronous clock
sys.reset : system reset
vga.hsync : horinontal sync
vga.vsync : veritcal sync
vga.red :
vga.green :
vga.blue :
vmem.addr : pixel address
vmem.red : read pixel value
vmem.green :
vmem.blue :
Parameters:
-----------
resolution : video resolution
refresh_rate : vertical rate in Hz
line_rate : horizontal rate in Hz
VGA Timing
----------
"""
res = resolution
clock = sys.clock
reset = sys.reset
# compute the limits (counter limits) for the vsync
# and hsync timings. Review the cacl_timing function
# for defintions of A,B,C,D,E,O,P,Q,R,S, and Z
(A,B,C,D,E,O,
P,Q,R,S,X,Z,) = calc_timings(clock.frequency, resolution,
refresh_rate, line_rate)
FullScreen = O
# counters to count the pixel clock (clock)
HPXL,VPXL = res
xcnt = intbv(0, min=-1, max=X+1) # clock div
hcnt = intbv(0, min=0, max=A+1) # hor count in ticks
vcnt = intbv(0, min=0, max=O+1) # ver count in ticks
#hpxl = Signal(intbv(0, min=0, max=HPXL)) # hor pixel (x coord)
#vpxl = Signal(intbv(0, min=0, max=VPXL)) # ver pixel (y coord)
hpxl = vmem.hpxl
vpxl = vmem.vpxl
# debug stuff
hcd = Signal(hcnt)
vcd = Signal(vcnt)
# the hsync and vsync are periodic so we can start anywhere,
# it is convinient to start at the active pixel area
@always_seq(clock.posedge, reset=reset)
def rtl_sync():
# horizontal and vertical counters
hcnt[:] = hcnt + 1
vcnt[:] = vcnt + 1
if vcnt == FullScreen:
vcnt[:] = 0
hcnt[:] = 0
elif vcnt > R:
hcnt[:] = A-1
elif hcnt >= A:
hcnt[:] = 0
# clock divider for pixel enable
xcnt[:] = xcnt + 1
if hcnt == 1:
xcnt[:] = 1
elif xcnt == X:
xcnt[:] = 0
# tick counter to generate pixel enable
if xcnt == 0 and hcnt <= D:
vga.pxlen.next = True
else:
vga.pxlen.next = False
# genrate the VGA strobes
if hcnt >= (D+E) and hcnt < (D+E+B):
vga.hsync.next = False
else:
vga.hsync.next = True
if vcnt >= (R+S) and vcnt < (R+S+P):
vga.vsync.next = False
else:
vga.vsync.next = True
# current pixel x,y coordinates
if hpxl < (HPXL-1) and xcnt == 0 and hcnt <= D:
hpxl.next = hpxl + 1
elif hcnt > (D+E):
hpxl.next = 0
if hcnt >= (A-1) and vcnt < R:
vpxl.next = vpxl + 1
elif vcnt > (R+S):
vpxl.next = 0
# debug and verification
hcd.next = hcnt
vcd.next = vcnt
# end debug stuff
# logically define which VGA state currently in. This is
# required for (simplified) verification but will be removed
# by synthesis (outputs dangling)
@always_comb
def rtl_state():
if not vga.hsync:
vga.state.next = vga.States.HSYNC
elif not vga.vsync:
vga.state.next = vga.States.VSYNC
elif hcd < D:
vga.state.next = vga.States.ACTIVE
elif vcd >= R and vcd < (R+S):
vga.state.next = vga.States.VER_FRONT_PORCH
elif vcd >= (R+S) and vcd < (R+S+P):
pass # should be handled by above
elif vcd >= (R+S+P) and vcd < (FullScreen):
vga.state.next = vga.States.VER_BACK_PORCH
elif hcd >= D and hcd < (D+E):
vga.state.next = vga.States.HOR_FRONT_PORCH
elif hcd >= (D+E) and hcd < (D+E+B):
pass # should be handled by above
elif hcd >= (D+E+B) and hcd < (D+E+B+C):
vga.state.next = vga.States.HOR_BACK_PORCH
if hcd < D:
vga.active.next = True
else:
vga.active.next = False
#_state = Signal(intbv(0)[8:])
#@always_comb
#def tmon():
# _state.next = int(vga.state._val._index)
# map the video memory pixels to the VGA bus
@always_comb
def rtl_map():
vga.red.next = vmem.red
vga.green.next = vmem.green
vga.blue.next = vmem.blue
return rtl_sync, rtl_state, rtl_map
if __name__ == '__main__':
sys = System(frequency=50e6)
vga = VGA()
vmem = VideoMemory()
g = m_vga_sync(sys, vga, vmem)
toVerilog(m_vga_sync, sys, vga, vmem)
| |
# PROBLEM 1 : The Adoption Center
class AdoptionCenter:
"""
The AdoptionCenter class stores the important information that a
client would need to know about, such as the different numbers of
species stored, the location, and the name. It also has a method to adopt a pet.
"""
def __init__(self, name, species_types, location):
# Your Code Here
self.name=name
self.species_types=species_types
self.location=location
def get_number_of_species(self, animal):
# Your Code Here
try:
return self.species_types[animal]
except:
return 0
def get_location(self):
# Your Code Here
a=float(self.location[0])
b=float(self.location[1])
return (a,b)
def get_species_count(self):
# Your Code Here
d=self.species_types.copy()
return d
def get_name(self):
# Your Code Here
return self.name
def adopt_pet(self, species):
# Your Code Here
flag=0
try:
if self.species_types[species]<=1:
del self.species_types[species]
flag=1
except:
pass
if flag==0:
try:
self.species_types[species]-=1
except:
pass
# PROBLEM 2 : Meet the Adopter
class Adopter:
"""
Adopters represent people interested in adopting a species.
They have a desired species type that they want, and their score is
simply the number of species that the shelter has of that species.
"""
def __init__(self, name, desired_species):
# Your Code Here
self.name=name
self.desired_species=desired_species
def get_name(self):
# Your Code Here
return self.name
def get_desired_species(self):
# Your Code Here
return self.desired_species
def get_score(self, adoption_center):
# Your Code Here
d=adoption_center.get_species_count()
try:
return float(d[self.desired_species])
except:
return 0.0
# PROBLEM 3 : he Flexible and Fearful Adopters
class FlexibleAdopter(Adopter):
"""
A FlexibleAdopter still has one type of species that they desire,
but they are also alright with considering other types of species.
considered_species is a list containing the other species the adopter will consider
Their score should be 1x their desired species + .3x all of their desired species
"""
# Your Code Here, should contain an __init__ and a get_score method.
def __init__(self, name, desired_species, considered_species):
self.name=name
self.desired_species=desired_species
self.considered_species=considered_species
def get_score(self,adoption_center):
d=adoption_center.get_species_count()
s=0.0
for i in self.considered_species:
try:
s=s+d[i]
except:
#s=s+0.0
pass
if Adopter.get_score(self,adoption_center)+(.3*float(s))>0:
return Adopter.get_score(self,adoption_center)+(.3*float(s))
else:
return 0.0
class FearfulAdopter(Adopter):
"""
A FearfulAdopter is afraid of a particular species of animal.
If the adoption center has one or more of those animals in it, they will
be a bit more reluctant to go there due to the presence of the feared species.
Their score should be 1x number of desired species - .3x the number of feared species
"""
# Your Code Here, should contain an __init__ and a get_score method.
def __init__(self, name, desired_species, feared_species):
self.name=name
self.desired_species=desired_species
self.feared_species=feared_species
def get_score(self,adoption_center):
d=adoption_center.get_species_count()
s=0.0
#for i in self.feared_species:
try:
s=d[self.feared_species]
except:
#s=s+0.0
pass
if Adopter.get_score(self,adoption_center)>(.3*float(s)):
return Adopter.get_score(self,adoption_center)-(.3*float(s))
else:
return 0.0
# PROBLEM 4 : AllergicAdopter and MedicatedAllergicAdopter
class AllergicAdopter(Adopter):
"""
An AllergicAdopter is extremely allergic to a one or more species and cannot
even be around it a little bit! If the adoption center contains one or more of
these animals, they will not go there.
Score should be 0 if the center contains any of the animals, or 1x number of desired animals if not
"""
# Your Code Here, should contain an __init__ and a get_score method.
def __init__(self, name, desired_species, allergic_species):
self.name=name
self.desired_species=desired_species
self.allegric_species=allergic_species
def get_score(self,adoption_center):
flag=0
d=adoption_center.get_species_count()
for i in self.allegric_species:
if i in d:
flag=1
break
if flag==1:
return 0.0
else:
return Adopter.get_score(self,adoption_center)
class MedicatedAllergicAdopter(AllergicAdopter):
"""
A MedicatedAllergicAdopter is extremely allergic to a particular species
However! They have a medicine of varying effectiveness, which will be given in a dictionary
To calculate the score for a specific adoption center, we want to find what is the most allergy-inducing species that the adoption center has for the particular MedicatedAllergicAdopter.
To do this, first examine what species the AdoptionCenter has that the MedicatedAllergicAdopter is allergic to, then compare them to the medicine_effectiveness dictionary.
Take the lowest medicine_effectiveness found for these species, and multiply that value by the Adopter's calculate score method.
"""
# Your Code Here, should contain an __init__ and a get_score method.
def __init__(self, name, desired_species, allergic_species, medicine_effectiveness):
self.name=name
self.desired_species=desired_species
self.allergic_species=allergic_species
self.medicine_effectiveness=medicine_effectiveness
def get_score(self,adoption_center):
mi=1.0
d=adoption_center.get_species_count()
for i in self.allergic_species:
if i in d:
if mi>self.medicine_effectiveness[i]:
mi=self.medicine_effectiveness[i]
return Adopter.get_score(self,adoption_center)*mi
# PROBLEM 5 : The Sluggish Adopter
class SluggishAdopter(Adopter):
def __init__(self, name, desired_species, location):
Adopter.__init__(self,name,desired_species)
self.name = name
self.desired_species = desired_species
self.location = (float(location[0]),float(location[1]))
def get_linear_distance(self,to_location):
x1 = self.location[0]
y1 = self.location[1]
x2 = float(to_location[0])
y2 = float(to_location[1])
d = ((x1-x2)**2+(y1-y2)**2)**(1.0/2.0)
return d
def get_score(self,adoption_center):
d = self.get_linear_distance(adoption_center.get_location())
if d < 1.0:
return adoption_center.get_number_of_species(self.desired_species)
elif d >= 1.0 and d < 3.0:
return random.uniform(0.7,0.9)*adoption_center.get_number_of_species(self.desired_species)
elif d >= 3.0 and d < 5.0:
return random.uniform(0.5,0.7)*adoption_center.get_number_of_species(self.desired_species)
elif d >= 5.0:
return random.uniform(0.1,0.5)*adoption_center.get_number_of_species(self.desired_species)
| |
"""Handle copying data between sites.
This is one of the key tasks of 'cax' because it's responsible for moving
data between sites. At present, it just does scp.
"""
import datetime
import logging
import os
import time
import shutil
import scp
from paramiko import SSHClient, util
import pax
from cax import config
from cax.task import Task
from cax import qsub
from cax.tasks.clear import BufferPurger
from cax.tasks.tsm_mover import TSMclient
from cax.tasks.rucio_mover import RucioBase, RucioRule, RucioDownload
from cax.tasks.checksum import ChecksumMethods
import subprocess
class CopyBase(Task):
def copy(self, datum_original, datum_destination, method, option_type, data_type):
if option_type == 'upload':
config_destination = config.get_config(datum_destination['host'])
server = config_destination['hostname']
username = config_destination['username']
else:
config_original = config.get_config(datum_original['host'])
server = config_original['hostname']
username = config_original['username']
if config.nstream_settings() == None:
nstreams = 1
else:
nstreams = config.nstream_settings()
if config.get_cert() == None:
grid_cert = ''
else:
grid_cert = config.get_cert()
# Determine method for remote site
if method == 'scp':
self.copySCP(datum_original, datum_destination,
server, username, option_type)
elif method == 'rsync':
self.copyRSYNC(datum_original, datum_destination,
server, username, option_type, data_type)
elif method == 'gfal-copy':
self.copyGFAL(datum_original, datum_destination,
server, option_type, nstreams, grid_cert)
elif method == 'lcg-cp':
self.copyLCGCP(datum_original, datum_destination,
server, option_type, nstreams)
elif method == 'rucio' and option_type == 'upload':
self.rucio.copyRucio(
datum_original, datum_destination, option_type)
elif method == 'rucio' and option_type == 'download':
self.ruciodw.DoDownload(
datum_original, datum_destination, option_type)
else:
print(method + " not implemented")
raise NotImplementedError()
def copyLCGCP(self, datum_original, datum_destination, server, option_type, nstreams):
"""Copy data via GFAL function
WARNING: Only SRM<->Local implemented (not yet SRM<->SRM)
"""
dataset = datum_original['location'].split('/').pop()
# gfal-copy arguments:
# -n: number of streams (4 for now, but doesn't work on xe1t-datamanager so use lcg-cp instead)
# Currently for resolving Midway address, may not be needed for other sites
command_options = "-b -D srmv2 "
command_options += "-n %d " % (nstreams)
command = "time lcg-cr " + command_options
status = -1
if option_type == 'upload':
logging.info(option_type + ": %s to %s" % (datum_original['location'],
server + datum_destination['location']))
# Simultaneous LFC registration
lfc_config = config.get_config("lfc")
if not os.path.isdir(datum_original['location']):
raise TypeError('{} is not a directory.'.format(
datum_original['location']))
for root, dirs, files in os.walk(datum_original['location'], topdown=True):
for filename in files:
# Warning: Processed data dir not implemented for LFC here
lfc_address = lfc_config['hostname'] + \
lfc_config['dir_' + datum_original['type']]
full_command = command + \
"-d " + server + datum_destination['location'] + "/" + filename + " " + \
"-l " + lfc_address + "/" + dataset + "/" + filename + " " + \
"file://" + \
datum_original['location'] + "/" + filename
self.log.info(full_command)
try:
lcg_out = subprocess.check_output(
full_command, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as lcg_exec:
self.log.error(
lcg_exec.output.rstrip().decode('ascii'))
self.log.error("Error: lcg-cr status = %d\n" %
lcg_exec.returncode)
raise
else: # download
logging.info(option_type + ": %s to %s" % (server + datum_original['location'],
datum_destination['location']))
raise NotImplementedError()
#command = "time lcg-cp "+command_options
# full_command = command+ \
# server+datum_original['location']+" "+ \
# "file://"+datum_destination['location']
def copyGFAL(self, datum_original, datum_destination, server, option_type, nstreams, grid_cert):
"""Copy data via GFAL function
WARNING: Only SRM<->Local implemented (not yet SRM<->SRM)
"""
dataset = datum_original['location'].split('/').pop()
# gfal-copy arguments:
# -f: overwrite
# -r: recursive
# -n: number of streams (4 for now, but doesn't work on xe1t-datamanager so use lcg-cp instead)
# -t: timeout in seconds
# -K: specify checksum algorithm
# --cert: path to initialized GRID certificate (voms-proxy-init -voms xenon.biggrid.nl -valid 168:00 -out user_cert)
command = "time gfal-copy -v -f -r -p -t 32400 -K adler32 --cert %s -n %d " % (
grid_cert, nstreams)
status = -1
if option_type == 'upload':
logging.info(option_type + ": %s to %s" % (datum_original['location'],
server + datum_destination['location']))
# Simultaneous LFC registration
#lfc_config = config.get_config("lfc")
# Warning: Processed data dir not implemented for LFC here
#lfc_address = lfc_config['hostname']+lfc_config['dir_'+datum_original['type']]
# Use GSIFTP address instead of POSIX from Stash (to avoid login node)
if config.get_hostname() == 'login':
config_original = config.get_config(datum_original['host'])
server_original = config_original['hostname']
full_command = command + \
server_original + datum_original['location'] + " " + \
server + datum_destination['location'] # +" "+ \
# lfc_address+"/"+dataset
# Use SRM address instead of POSIX from Midway (to avoid worker nodes)
# elif config.get_hostname() == 'midway-login1':
# server_original = 'srm://srm1.rcc.uchicago.edu:8443/srm/v2/server?SFN='
# full_command = command+ \
# server_original+datum_original['location']+" "+ \
# server+datum_destination['location'] #+" "+ \
# lfc_address+"/"+dataset
else:
full_command = command + \
"file://" + datum_original['location'] + " " + \
server + datum_destination['location'] # +" "+ \
# lfc_address+"/"+dataset
else: # download
logging.info(option_type + ": %s to %s" % (server + datum_original['location'],
datum_destination['location']))
full_command = command + \
server + datum_original['location'] + " " + \
"file://" + datum_destination['location']
self.log.info(full_command)
try:
gfal_out = subprocess.check_output(
full_command, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as gfal_exec:
self.log.error(gfal_exec.output.rstrip().decode('ascii'))
self.log.error("Error: gfal-copy status = %d\n" %
gfal_exec.returncode)
raise
gfal_out_ascii = gfal_out.rstrip().decode('ascii')
if "error" in gfal_out_ascii.lower(): # Some errors don't get caught above
self.log.error(gfal_out_ascii)
raise
else:
self.log.info(gfal_out_ascii) # To print timing
def copyRSYNC(self, datum_original, datum_destination, server, username, option_type, data_type):
"""Copy data via rsync function
"""
command = "time rsync -r --stats "
if data_type is 'raw':
command += "--append "
status = -1
if option_type == 'upload':
logging.info(option_type + ": %s to %s" % (datum_original['location'],
server + datum_destination['location']))
full_command = command + \
datum_original['location'] + " " + \
username + "@" + server + ":" + \
os.path.dirname(datum_destination['location'])
else: # download
logging.info(option_type + ": %s to %s" % (server + datum_original['location'],
datum_destination['location']))
full_command = command + \
username + "@" + server + ":" + datum_original['location'] + " " + \
os.path.dirname(datum_destination['location'])
self.log.info(full_command)
try:
rsync_out = subprocess.check_output(
full_command, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as rsync_exec:
self.log.error(rsync_exec.output.rstrip().decode('ascii'))
self.log.error("Error: rsync status = %d\n" %
rsync_exec.returncode)
raise
rsync_out_ascii = rsync_out.rstrip().decode('ascii')
if "error" in rsync_out_ascii.lower(): # Some errors don't get caught above
self.log.error(rsync_out_ascii)
raise
else:
self.log.info(rsync_out_ascii) # To print timing
def copySCP(self, datum_original, datum_destination, server, username, option_type):
"""Copy data via SCP function
"""
util.log_to_file('ssh.log')
ssh = SSHClient()
ssh.load_system_host_keys()
logging.info("connection to %s" % server)
ssh.connect(server,
username=username,
compress=True,
timeout=60)
# SCPCLient takes a paramiko transport as its only argument
client = scp.SCPClient(ssh.get_transport())
logging.info(option_type + ": %s to %s" % (datum_original['location'],
datum_destination['location']))
if option_type == 'upload':
client.put(datum_original['location'],
datum_destination['location'],
recursive=True)
else:
client.get(datum_original['location'],
datum_destination['location'],
recursive=True)
client.close()
def each_run(self):
"""Run over the requested data types according to the json config file"""
if 'data_type' not in config.get_config(config.get_hostname()):
logging.info(
"Error: Define a data_type in your configuration file")
logging.info(" (e.g. 'data_type': ['raw'])")
exit()
for data_type in config.get_config(config.get_hostname())['data_type']:
self.log.debug("%s" % data_type)
self.do_possible_transfers(option_type=self.option_type,
data_type=data_type)
def do_possible_transfers(self,
option_type='upload',
data_type='raw'):
"""Determine candidate transfers.
:param option_type: 'upload' or 'download'
:type str
:param data_type: 'raw' or 'processed'
:type str
:return:
"""
# Get the 'upload' or 'download' options.
options = config.get_transfer_options(option_type)
# If no options, can't do anything
if options is None:
return None, None
# If should be purged, don't pull
PurgeObj = BufferPurger()
PurgeObj.run_doc = self.run_doc
if option_type == 'download' and data_type == 'raw' and PurgeObj.check_purge_requirements():
self.log.info("Skip raw download that would be purged")
return None, None
start = time.time()
# For this run, where do we have transfer access?
datum_there = None
datum_here = None
for remote_host in options:
self.log.debug(remote_host)
# Get transfer protocol
method = config.get_config(remote_host)['method']
if not method:
print("Must specify transfer protocol (method) for " + remote_host)
raise
datum_here, datum_there = self.local_data_finder(data_type,
option_type,
remote_host)
# Delete the old data base entry if rucio transfers are requested
# and an old upload failed by a bad connection error.
if method == "rucio" and datum_there != None and datum_there['status'] == 'RSEreupload' and config.DATABASE_LOG == True:
self.log.info(
"Former upload of %s failed with error", datum_here['location'])
self.log.info(
"[('Connection aborted.', BadStatusLine('',))] -> Delete runDB status and start again")
self.collection.update({'_id': self.run_doc['_id']},
{'$pull': {'data': datum_there}})
# Upload logic for everything exepct tape
if option_type == 'upload' and method != "tsm" and datum_here and (datum_there is None or datum_there['status'] == 'RSEreupload'):
self.copy_handshake(datum_here, remote_host,
method, option_type, data_type)
break
# Download logic for everything exepct tape
if option_type == 'download' and datum_there and datum_here is None and method != "tsm":
self.copy_handshake(
datum_there, config.get_hostname(), method, option_type, data_type)
break
# Upload tsm:
if option_type == 'upload' and datum_here and datum_there is None and method == "tsm":
self.copy_tsm(datum_here, config.get_config(
remote_host)['name'], method, option_type)
break
# Download tsm:
if option_type == 'download' and datum_there and datum_here is None and method == "tsm":
self.copy_tsm_download(
datum_there, config.get_hostname(), method, option_type)
break
dataset = None
if datum_there is not None:
dataset = datum_there['location'].split('/').pop()
elif datum_here is not None:
dataset = datum_here['location'].split('/').pop()
if dataset is not None: # Not sure why it does this sometimes
end = time.time()
elapsed = end - start
self.log.info(method + " " + option_type +
" dataset " + dataset + " took %d seconds" % elapsed)
def local_data_finder(self, data_type, option_type, remote_host):
datum_here = None # Information about data here
datum_there = None # Information about data there
version = 'v%s' % pax.__version__
# Iterate over data locations to know status
for datum in self.run_doc['data']:
# Is host known?
if 'host' not in datum or datum['type'] != data_type:
continue
transferred = (datum['status'] == 'transferred')
# If the location refers to here
if datum['host'] == config.get_hostname():
# If uploading, we should have data
if option_type == 'upload' and not transferred:
continue
if datum['type'] == 'processed' and not version == datum['pax_version']:
continue
datum_here = datum.copy()
elif datum['host'] == remote_host: # This the remote host?
# If downloading, they should have data
if option_type == 'download' and not transferred:
continue
if datum['type'] == 'processed' and not version == datum['pax_version']:
continue
datum_there = datum.copy()
return datum_here, datum_there
def copy_tsm_download(self, datum, destination, method, option_type):
"""A dedicated download function for downloads from tape storage"""
self.tsm = TSMclient()
logging.info('Tape Backup to PDC STOCKHOLM (Download)')
raw_data_location = datum['location']
raw_data_filename = datum['location'].split('/')[-1]
raw_data_path = config.get_config(config.get_hostname())['dir_raw']
raw_data_tsm = config.get_config(config.get_hostname())['dir_tsm']
logging.info("Raw data location @xe1t-datamanager: %s",
raw_data_location)
logging.info("Path to raw data: %s", raw_data_path)
logging.info("Path to tsm data: %s", raw_data_tsm)
logging.info("File/Folder for backup: %s", raw_data_filename)
self.log.debug("Notifying run database")
datum_new = {'type': datum['type'],
'host': destination,
'status': 'transferring',
'location': "n/a",
'checksum': None,
'creation_time': datetime.datetime.utcnow(),
}
logging.info("new entry for rundb: %s", datum_new)
if config.DATABASE_LOG == True:
result = self.collection.update_one({'_id': self.run_doc['_id'],
},
{'$push': {'data': datum_new}})
if result.matched_count == 0:
self.log.error("Race condition! Could not copy because another "
"process seemed to already start.")
return
logging.info("Start tape download")
# Sanity Check
if self.tsm.check_client_installation() == False:
logging.info("There is a problem with your dsmc client")
return
# Do download:
tsm_download_result = self.tsm.download(
raw_data_location, raw_data_path, raw_data_filename)
if os.path.exists(raw_data_path + raw_data_filename) == False:
logging.info("Download to %s failed.", raw_data_path)
if config.DATABASE_LOG:
# Notify the database if something went wrong during the download:
logging.info("Notifiy the runDB: error")
self.collection.update({'_id': self.run_doc['_id'],
'data': {
'$elemMatch': datum_new}},
{'$set': {'data.$.status': "error",
'data.$.location': "n/a",
'data.$.checksum': "n/a",
}
})
# Rename
file_list = []
for (dirpath, dirnames, filenames) in os.walk(raw_data_path + raw_data_filename):
file_list.extend(filenames)
break
for i_file in file_list:
path_old = raw_data_path + raw_data_filename + "/" + i_file
path_new = raw_data_path + raw_data_filename + "/" + i_file[12:]
if not os.path.exists(path_new):
os.rename(path_old, path_new)
# Do checksum and summarize it:
checksum_after = self.tsm.get_checksum_folder(
raw_data_path + "/" + raw_data_filename)
logging.info("Summary of the download for checksum comparison:")
logging.info("Number of downloaded files: %s",
tsm_download_result["tno_restored_objects"])
logging.info("Transferred amount of data: %s",
tsm_download_result["tno_restored_bytes"])
logging.info("Network transfer rate: %s",
tsm_download_result["tno_network_transfer_rate"])
logging.info("Download time: %s",
tsm_download_result["tno_data_transfer_time"])
logging.info("Number of failed downloads: %s",
tsm_download_result["tno_failed_objects"])
logging.info("MD5 Hash (database entry): %s", datum['checksum'])
logging.info("MD5 Hash (downloaded data): %s", checksum_after)
if checksum_after == datum['checksum']:
logging.info(
"The download/restore of the raw data set %s was [SUCCESSFUL]", raw_data_filename)
logging.info("Raw data set located at: %s",
raw_data_path + raw_data_filename)
elif checksum_after != datum['checksum']:
logging.info(
"The download/restore of the raw data set %s [FAILED]", raw_data_filename)
logging.info("Checksums do not agree!")
# Notifiy the database for final registration
if checksum_after == datum['checksum']:
if config.DATABASE_LOG:
# Notify the database if everything was fine:
logging.info("Notifiy the runDB: transferred")
self.collection.update({'_id': self.run_doc['_id'],
'data': {
'$elemMatch': datum_new}},
{'$set': {'data.$.status': "transferred",
'data.$.location': raw_data_path + raw_data_filename,
'data.$.checksum': checksum_after,
}
})
else:
logging.info("Database is not notified")
elif checksum_after != datum['checksum']:
if config.DATABASE_LOG:
# Notify the database if something went wrong during the download:
logging.info("Notifiy the runDB: error")
self.collection.update({'_id': self.run_doc['_id'],
'data': {
'$elemMatch': datum_new}},
{'$set': {'data.$.status': "error",
'data.$.location': "n/a",
'data.$.checksum': "n/a",
}
})
else:
logging.info("Database is not notified")
return 0
def copy_tsm(self, datum, destination, method, option_type):
# hard coded sha512 checksum which stands for an empty directory
#(Used for verifying the goodness of the uploaded data)"
checksum_empty_dir = "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
# Init the TSM client for tape backup from an extern class
self.tsm = TSMclient()
logging.info('Tape Backup to PDC STOCKHOLM')
print(datum, destination, method, option_type)
logging.debug("Notifying run database")
datum_new = {'type': datum['type'],
'host': destination,
'status': 'transferring',
'location': "n/a",
'checksum': None,
'creation_time': datetime.datetime.utcnow(),
}
logging.info("new entry for rundb: %s", datum_new)
if config.DATABASE_LOG == True:
result = self.collection.update_one({'_id': self.run_doc['_id'],
},
{'$push': {'data': datum_new}})
if result.matched_count == 0:
self.log.error("Race condition! Could not copy because another "
"process seemed to already start.")
return
raw_data_location = datum['location']
raw_data_filename = datum['location'].split('/')[-1]
raw_data_path = raw_data_location.replace(raw_data_filename, "")
raw_data_tsm = config.get_config(config.get_hostname())['dir_tsm']
logging.info("Raw data location @xe1t-datamanager: %s",
raw_data_location)
logging.info("Path to raw data: %s", raw_data_path)
logging.info("Path to tsm data: %s", raw_data_tsm)
logging.info("File/Folder for backup: %s", raw_data_filename)
# Do a simple pretest to analyse the directory what is going to be backuped up
# continue only if there are files in the directory and no more folders
list_files = []
list_folders = []
for root, dirs, files in os.walk(raw_data_path + raw_data_filename):
for name in files:
list_files.append(name)
for name in dirs:
list_folders.append(name)
# Sanity check if raw data folder contains a subfolder (mostly important for old raw data sets)
if len(list_files) == 0 or len(list_folders) > 0:
logging.info("ERROR: There are %s files in %s", len(
list_files), raw_data_path + raw_data_filename)
if len(list_folders) > 0:
logging.info("ERROR: These folders are found in %s:",
raw_data_path + raw_data_filename)
for i_folders in list_folders:
logging.info(" <> %s", i_folders)
logging.info("Check the error(s) and start again")
if config.DATABASE_LOG:
self.collection.update({'_id': self.run_doc['_id'],
'data': {
'$elemMatch': datum_new}},
{'$set': {'data.$.status': "error",
'data.$.location': "n/a",
'data.$.checksum': "n/a",
}
})
return
else:
logging.info(
"Pre-test of %s counts %s files for tape upload [succcessful]", raw_data_path + raw_data_filename, len(list_files))
# Do a checksum pre-test for double counts:
checksum_pretest_list = []
for i_file in files:
f_path = os.path.join(raw_data_path, raw_data_filename, i_file)
pre_test_checksum = ChecksumMethods.get_crc32(self, f_path)
checksum_pretest_list.append(pre_test_checksum)
double_counts = set(
[x for x in checksum_pretest_list if checksum_pretest_list.count(x) > 1])
if len(double_counts) > 0:
logging.info("Pre checksum test: [failed]")
logging.info("There are two or more identical checksums observed in %s", os.path.join(
raw_data_path, raw_data_filename))
if config.DATABASE_LOG:
self.collection.update({'_id': self.run_doc['_id'],
'data': {
'$elemMatch': datum_new}},
{'$set': {'data.$.status': "error",
'data.$.location': "n/a",
'data.$.checksum': "n/a",
}
})
return
else:
logging.info("Pre checksum test: [succcessful]")
# Check first if everything is fine with the dsmc client
if self.tsm.check_client_installation() == False:
logging.info("There is a problem with your dsmc client")
if config.DATABASE_LOG:
self.collection.update({'_id': self.run_doc['_id'],
'data': {
'$elemMatch': datum_new}},
{'$set': {'data.$.status': "error",
'data.$.location': "n/a",
'data.$.checksum': "n/a",
}
})
return
#logging.debug("Notifying run database")
# datum_new = {'type' : datum['type'],
#'host' : destination,
#'status' : 'transferring',
#'location' : "n/a",
#'checksum' : None,
#'creation_time': datetime.datetime.utcnow(),
#}
#logging.info("new entry for rundb: %s", datum_new )
# if config.DATABASE_LOG == True:
# result = self.collection.update_one({'_id': self.run_doc['_id'],
#},
#{'$push': {'data': datum_new}})
# if result.matched_count == 0:
# self.log.error("Race condition! Could not copy because another "
#"process seemed to already start.")
# return
logging.info("Start tape upload")
# Prepare a copy from raw data location to tsm location ( including renaming)
checksum_before_raw = self.tsm.get_checksum_folder(
raw_data_path + raw_data_filename)
file_list = []
for (dirpath, dirnames, filenames) in os.walk(raw_data_path + raw_data_filename):
file_list.extend(filenames)
break
if not os.path.exists(raw_data_tsm + raw_data_filename):
os.makedirs(raw_data_tsm + raw_data_filename)
for i_file in file_list:
path_old = raw_data_path + raw_data_filename + "/" + i_file
path_new = raw_data_tsm + raw_data_filename + \
"/" + raw_data_filename + "_" + i_file
if not os.path.exists(path_new):
shutil.copy2(path_old, path_new)
checksum_before_tsm = self.tsm.get_checksum_folder(
raw_data_tsm + raw_data_filename)
if checksum_before_raw != checksum_before_tsm:
logging.info("Something went wrong during copy & rename")
if config.DATABASE_LOG:
self.collection.update({'_id': self.run_doc['_id'],
'data': {
'$elemMatch': datum_new}},
{'$set': {'data.$.status': "error",
'data.$.location': "n/a",
'data.$.checksum': "n/a",
}
})
return
elif checksum_before_raw == checksum_before_tsm:
logging.info("Copy & rename: [succcessful] -> Checksums agree")
tsm_upload_result = self.tsm.upload(raw_data_tsm + raw_data_filename)
logging.info("Number of uploaded files: %s",
tsm_upload_result["tno_backedup"])
logging.info("Number of inspected files: %s",
tsm_upload_result["tno_inspected"])
logging.info("Number of failed files: %s",
tsm_upload_result["tno_failed"])
logging.info("Transferred amount of data: %s",
tsm_upload_result["tno_bytes_transferred"])
logging.info("Inspected amount of data: %s",
tsm_upload_result["tno_bytes_inspected"])
logging.info("Upload time: %s",
tsm_upload_result["tno_data_transfer_time"])
logging.info("Network transfer rate: %s",
tsm_upload_result["tno_network_transfer_rate"])
logging.info("MD5 Hash (raw data): %s", checksum_before_tsm)
test_download = os.path.join(raw_data_tsm, "tsm_verify_download")
# Make sure that temp. download directory exists:
if not os.path.exists(test_download):
os.makedirs(test_download)
logging.info("Start the re-download to %s", test_download)
tsm_download_result = self.tsm.download(
raw_data_tsm + raw_data_filename, test_download, raw_data_filename)
logging.info("Finished the re-download")
if os.path.exists(test_download + "/" + raw_data_filename) == False:
logging.info("Download to %s failed. Checksum will not match",
test_download + "/" + raw_data_filename)
else:
logging.info("Download to %s succcessful. Folder exists",
test_download + "/" + raw_data_filename)
checksum_after = self.tsm.get_checksum_folder(
test_download + "/" + raw_data_filename)
logging.info("Summary of the download for checksum comparison:")
logging.info("Number of downloaded files: %s",
tsm_download_result["tno_restored_objects"])
logging.info("Transferred amount of data: %s",
tsm_download_result["tno_restored_bytes"])
logging.info("Network transfer rate: %s",
tsm_download_result["tno_network_transfer_rate"])
logging.info("Download time: %s",
tsm_download_result["tno_data_transfer_time"])
logging.info("Number of failed downloads: %s",
tsm_download_result["tno_failed_objects"])
logging.info("MD5 Hash (raw data): %s", checksum_after)
status = ""
if checksum_before_tsm == checksum_after and checksum_empty_dir != checksum_before_tsm and checksum_empty_dir != checksum_after:
logging.info("Upload to tape: [succcessful]")
status = "transferred"
else:
logging.info("Upload to tape: [failed]")
status = "error"
# Print a warning if the checksum crosscheck fails!
if checksum_empty_dir == checksum_before_tsm or checksum_empty_dir == checksum_after:
logging.info(
"Checksum test indicates an empty folder before or after the tape upload")
logging.info("Check your raw data directory %s for files",
raw_data_tsm + raw_data_filename)
# Delete check folder
shutil.rmtree(raw_data_tsm + raw_data_filename)
shutil.rmtree(test_download + "/" + raw_data_filename)
logging.info("Finished to delete temp. directories: %s and %s",
raw_data_tsm + raw_data_filename, test_download + "/" + raw_data_filename)
if config.DATABASE_LOG:
self.collection.update({'_id': self.run_doc['_id'],
'data': {
'$elemMatch': datum_new}},
{'$set': {'data.$.status': status,
'data.$.location': raw_data_tsm + raw_data_filename,
'data.$.checksum': checksum_after,
}
})
logging.info("Update database")
return 0
def copy_handshake(self, datum, destination, method, option_type, data_type):
""" Perform all the handshaking required with the run DB.
:param datum: The dictionary data location describing data to be
transferred
:type str
:param destination: The host name where data should go to.
:type str
:return:
"""
# Get information about this destination
destination_config = config.get_config(destination)
self.log.info(option_type + "ing run %d to: %s" % (self.run_doc['number'],
destination))
# Determine where data should be copied to
if destination_config['dir_%s' % datum['type']] != None:
base_dir = destination_config['dir_%s' % datum['type']]
if base_dir is None:
self.log.info("no directory specified for %s" % datum['type'])
return
if datum['type'] == 'processed':
self.log.info(datum)
base_dir = os.path.join(base_dir, 'pax_%s' %
datum['pax_version'])
# Check directory existence on local host for download only
if option_type == 'download' and not os.path.exists(base_dir):
if destination != config.get_hostname():
raise NotImplementedError("Cannot create directory on another "
"machine.")
# Recursively make directories
os.makedirs(base_dir)
else:
base_dir = "none"
# Directory or filename to be copied
filename = datum['location'].split('/')[-1]
self.log.debug("Notifying run database")
datum_new = {'type': datum['type'],
'host': destination,
'status': 'transferring',
'location': os.path.join(base_dir,
filename),
'checksum': None,
'creation_time': datetime.datetime.utcnow(),
}
if datum['type'] == 'processed':
for variable in ('pax_version', 'pax_hash', 'creation_place'):
datum_new[variable] = datum.get(variable)
if method == "rucio" and option_type == "upload":
# Init the rucio module when method==rucio is requested
self.log.info(
"Init rucio_mover module for Rucio transfers (upload)")
self.rucio = RucioBase(self.run_doc)
self.rucio.set_host(config.get_hostname())
self.rucio.set_remote_host(destination)
# Sanity check for rucio client
if self.rucio.sanity_checks() == False:
logging.info("!!! <<The sanity checks fail>> !!!")
return 0
# Add two further database entries for rucio related uploads
datum_new['rse'] = []
datum_new['location'] = "n/a"
datum_new['rule_info'] = "no_rule"
if method == "rucio" and option_type == "download":
rucio_catalogue_config = config.get_config("rucio-catalogue")
self.log.info(
"Init rucio_mover module for Rucio transfers (download)")
# Load and config the download module of rucio/ruciax
self.ruciodw = RucioDownload()
self.ruciodw.SetDatabaseEntry(self.run_doc)
self.ruciodw.ExternalDatabaseEntry()
self.ruciodw.SetDownloadConfig(
rucio_catalogue_config, destination_config)
# specify a not available path for the download destination
datum_new['location'] = "NA"
if config.DATABASE_LOG == True:
result = self.collection.update_one({'_id': self.run_doc['_id'],
},
{'$push': {'data': datum_new}})
if result.matched_count == 0:
self.log.error("Race condition! Could not copy because another "
"process seemed to already start.")
return
self.log.info('Starting ' + method)
try: # try to copy
self.copy(datum,
datum_new,
method,
option_type, data_type)
# Checksumming to follow on local site
if method == 'scp' or method == 'rsync':
status = 'verifying'
# Cannot do cax-checksum on GRID sites,
# so assume gfal-copy/lcg-cp checksum is sufficient
else:
status = 'verifying'
# TO DO: Manually copy checksum to DB entry here
except scp.SCPException as e:
self.log.exception(e)
status = 'error'
# WARNING: This needs to be extended to catch gfal-copy errors
except:
self.log.exception("Unexpected copy error")
status = 'error'
self.log.debug(method + " done, telling run database")
if config.DATABASE_LOG:
if method == "rucio" and option_type == "upload":
logging.info("Following entries are added to the runDB:")
logging.info(" * Status: %s",
self.rucio.get_rucio_info()['status'])
logging.info(" * Location: %s",
self.rucio.get_rucio_info()['location'])
logging.info(" * Checksum: %s",
self.rucio.get_rucio_info()['checksum'])
logging.info(" * RSE: %s", self.rucio.get_rucio_info()['rse'])
logging.info(" * Preliminary rule information: %s",
self.rucio.get_rucio_info()['rule_info'])
self.collection.update({'_id': self.run_doc['_id'],
'data': {
'$elemMatch': datum_new}},
{'$set': {
'data.$.status': self.rucio.get_rucio_info()['status'],
'data.$.location': self.rucio.get_rucio_info()['location'],
'data.$.checksum': self.rucio.get_rucio_info()['checksum'],
'data.$.rse': self.rucio.get_rucio_info()['rse'],
'data.$.rule_info': self.rucio.get_rucio_info()['rule_info']
}
})
elif method == "rucio" and option_type == "download":
logging.info("Following entries are added to the runDB:")
logging.info(" * Status: %s",
self.ruciodw.get_rucio_info()['status'])
logging.info(" * Location: %s",
self.ruciodw.get_rucio_info()['location'])
self.collection.update({'_id': self.run_doc['_id'],
'data': {
'$elemMatch': datum_new}},
{'$set': {
'data.$.status': self.ruciodw.get_rucio_info()['status'],
'data.$.location': self.ruciodw.get_rucio_info()['location']
}
})
else:
# Fill the data if method is not rucio
if config.DATABASE_LOG:
self.collection.update({'_id': self.run_doc['_id'],
'data': {
'$elemMatch': datum_new}},
{'$set': {
'data.$.status': status
}
})
if method == "rucio" and option_type == "upload":
# Rucio 'side load' to set the transfer rules directly after the file upload
if self.rucio.get_rucio_info()['status'] == "transferred":
logging.info(
"Initiate the RucioRule for a first set of transfer rules")
# Add: Outcome of the rucio transfers to the new database entry
# without read the runDB again.
datum_new['status'] = self.rucio.get_rucio_info()['status']
datum_new['location'] = self.rucio.get_rucio_info()['location']
datum_new['checksum'] = self.rucio.get_rucio_info()['checksum']
datum_new['rse'] = self.rucio.get_rucio_info()['rse']
datum_new['rule_info'] = self.rucio.get_rucio_info()[
'rule_info']
# Init the RucioRule module and set its runDB entry manually
self.rucio_rule = RucioRule()
self.rucio_rule.set_db_entry_manually(self.run_doc)
# Perform the initial rule setting:
self.rucio_rule.set_possible_rules(
data_type=datum['type'], dbinfo=datum_new)
logging.info("Status: transferred -> Transfer rules are set for %s",
self.rucio.get_rucio_info()['rse'])
# Commented out due to upload section (rucio_mover) option 3!
# No need to delete single file rules manually after upload
# let it sleep for 5 seconds:
# logging.info("Sleep")
# time.sleep(15)
#logging.info("Sleep time finished ")
#self.log.info("Delete individual rules of the uploaded files:")
# for i_file in self.rucio.get_rucio_info()['file_list']:
# i_location = "{iscope}:{ifile}".format( iscope=self.rucio.get_rucio_info()['scope_upload'],
# ifile=i_file.split("/")[-1] )
#self.log.info("Time out for %s", i_location)
#self.rucio_rule.update_rule( i_location, self.rucio.get_rucio_info()['rse'][0], "10" )
else:
logging.info(
"Something went wrong during the upload (error). No rules are set")
elif method == "rucio" and option_type == "download":
logging.info("<-- Finished download %s to location %s with status %s -->",
datum['location'], self.ruciodw.get_rucio_info()['location'], self.ruciodw.get_rucio_info()['status'])
logging.debug(method + " done, telling run database")
logging.info("End of " + option_type + "\n")
class CopyPush(CopyBase):
"""Copy data to there
If the data is transfered to current host and does not exist at any other
site (including transferring), then copy data there."""
option_type = 'upload'
class CopyPull(CopyBase):
"""Copy data to here
If data exists at a reachable host but not here, pull it.
"""
option_type = 'download'
| |
# Requires python3
import re
import sqlite3
import subprocess
import shutil
import os
import codecs
import datetime
import sys
import psycopg2
import psycopg2.extras
import socket
import csv
class TskDbDiff(object):
"""Compares two TSK/Autospy SQLite databases.
Attributes:
gold_artifacts:
autopsy_artifacts:
gold_attributes:
autopsy_attributes:
gold_objects:
autopsy_objects:
artifact_comparison:
attribute_comparision:
report_errors: a listof_listof_String, the error messages that will be
printed to screen in the run_diff method
passed: a boolean, did the diff pass?
autopsy_db_file:
gold_db_file:
"""
def __init__(self, output_db, gold_db, output_dir=None, gold_bb_dump=None, gold_dump=None, verbose=False, isMultiUser=False, pgSettings=None):
"""Constructor for TskDbDiff.
Args:
output_db_path: path to output database (non-gold standard)
gold_db_path: path to gold database
output_dir: (optional) Path to folder where generated files will be put.
gold_bb_dump: (optional) path to file where the gold blackboard dump is located
gold_dump: (optional) path to file where the gold non-blackboard dump is located
verbose: (optional) a boolean, if true, diff results are sent to stdout.
"""
self.output_db_file = output_db
self.gold_db_file = gold_db
self.output_dir = output_dir
self.gold_bb_dump = gold_bb_dump
self.gold_dump = gold_dump
self._generate_gold_dump = False
self._generate_gold_bb_dump = False
self._bb_dump_diff = ""
self._dump_diff = ""
self._bb_dump = ""
self._dump = ""
self.verbose = verbose
self.isMultiUser = isMultiUser
self.pgSettings = pgSettings
if self.isMultiUser and not self.pgSettings:
print("Missing PostgreSQL database connection settings data.")
sys.exit(1)
if self.gold_bb_dump is None:
self._generate_gold_bb_dump = True
if self.gold_dump is None:
self._generate_gold_dump = True
def run_diff(self):
"""Compare the databases.
Raises:
TskDbDiffException: if an error occurs while diffing or dumping the database
"""
self._init_diff()
id_obj_path_table = -1
# generate the gold database dumps if necessary
if self._generate_gold_dump:
id_obj_path_table = TskDbDiff._dump_output_db_nonbb(self.gold_db_file, self.gold_dump, self.isMultiUser, self.pgSettings)
if self._generate_gold_bb_dump:
TskDbDiff._dump_output_db_bb(self.gold_db_file, self.gold_bb_dump, self.isMultiUser, self.pgSettings, id_obj_path_table)
# generate the output database dumps (both DB and BB)
id_obj_path_table = TskDbDiff._dump_output_db_nonbb(self.output_db_file, self._dump, self.isMultiUser, self.pgSettings)
TskDbDiff._dump_output_db_bb(self.output_db_file, self._bb_dump, self.isMultiUser, self.pgSettings, id_obj_path_table)
# Compare non-BB
dump_diff_pass = self._diff(self._dump, self.gold_dump, self._dump_diff)
# Compare BB
bb_dump_diff_pass = self._diff(self._bb_dump, self.gold_bb_dump, self._bb_dump_diff)
self._cleanup_diff()
return dump_diff_pass, bb_dump_diff_pass
def _init_diff(self):
"""Set up the necessary files based on the arguments given at construction"""
if self.output_dir is None:
# No stored files
self._bb_dump = TskDbDiff._get_tmp_file("BlackboardDump", ".txt")
self._bb_dump_diff = TskDbDiff._get_tmp_file("BlackboardDump-Diff", ".txt")
self._dump = TskDbDiff._get_tmp_file("DBDump", ".txt")
self._dump_diff = TskDbDiff._get_tmp_file("DBDump-Diff", ".txt")
else:
self._bb_dump = os.path.join(self.output_dir, "BlackboardDump.txt")
self._bb_dump_diff = os.path.join(self.output_dir, "BlackboardDump-Diff.txt")
self._dump = os.path.join(self.output_dir, "DBDump.txt")
self._dump_diff = os.path.join(self.output_dir, "DBDump-Diff.txt")
# Sorting gold before comparing (sort behaves differently in different environments)
new_bb = TskDbDiff._get_tmp_file("GoldBlackboardDump", ".txt")
new_db = TskDbDiff._get_tmp_file("GoldDBDump", ".txt")
if self.gold_bb_dump is not None:
srtcmdlst = ["sort", self.gold_bb_dump, "-o", new_bb]
subprocess.call(srtcmdlst)
srtcmdlst = ["sort", self.gold_dump, "-o", new_db]
subprocess.call(srtcmdlst)
self.gold_bb_dump = new_bb
self.gold_dump = new_db
def _cleanup_diff(self):
if self.output_dir is None:
#cleanup temp files
os.remove(self._dump)
os.remove(self._bb_dump)
if os.path.isfile(self._dump_diff):
os.remove(self._dump_diff)
if os.path.isfile(self._bb_dump_diff):
os.remove(self._bb_dump_diff)
if self.gold_bb_dump is None:
os.remove(self.gold_bb_dump)
os.remove(self.gold_dump)
def _diff(self, output_file, gold_file, diff_path):
"""Compare two text files.
Args:
output_file: a pathto_File, the latest text file
gold_file: a pathto_File, the gold text file
diff_path: The file to write the differences to
Returns False if different
"""
if (not os.path.isfile(output_file)):
return False
if (not os.path.isfile(gold_file)):
return False
# It is faster to read the contents in and directly compare
output_data = codecs.open(output_file, "r", "utf_8").read()
gold_data = codecs.open(gold_file, "r", "utf_8").read()
if (gold_data == output_data):
return True
# If they are different, invoke 'diff'
diff_file = codecs.open(diff_path, "wb", "utf_8")
# Gold needs to be passed in as 1st arg and output as 2nd
dffcmdlst = ["diff", gold_file, output_file]
subprocess.call(dffcmdlst, stdout = diff_file)
# create file path for gold files inside output folder. In case of diff, both gold and current run files
# are available in the report output folder. Prefix Gold- is added to the filename.
gold_file_in_output_dir = output_file[:output_file.rfind("/")] + "/Gold-" + output_file[output_file.rfind("/")+1:]
shutil.copy(gold_file, gold_file_in_output_dir)
return False
def _dump_output_db_bb(db_file, bb_dump_file, isMultiUser, pgSettings, id_obj_path_table):
"""Dumps sorted text results to the given output location.
Smart method that deals with a blackboard comparison to avoid issues
with different IDs based on when artifacts were created.
Args:
db_file: a pathto_File, the output database.
bb_dump_file: a pathto_File, the sorted dump file to write to
"""
unsorted_dump = TskDbDiff._get_tmp_file("dump_data", ".txt")
if isMultiUser:
conn, unused_db = db_connect(db_file, isMultiUser, pgSettings)
artifact_cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
else: # Use Sqlite
conn = sqlite3.connect(db_file)
conn.text_factory = lambda x: x.decode("utf-8", "ignore")
conn.row_factory = sqlite3.Row
artifact_cursor = conn.cursor()
# Get the list of all artifacts (along with type and associated file)
# @@@ Could add a SORT by parent_path in here since that is how we are going to later sort it.
artifact_cursor.execute("SELECT tsk_files.parent_path, tsk_files.name, blackboard_artifact_types.display_name, blackboard_artifacts.artifact_id FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_files ON tsk_files.obj_id = blackboard_artifacts.obj_id")
database_log = codecs.open(unsorted_dump, "wb", "utf_8")
row = artifact_cursor.fetchone()
appnd = False
counter = 0
artifact_count = 0
artifact_fail = 0
# Cycle through artifacts
try:
while (row != None):
# File Name and artifact type
if(row["parent_path"] != None):
database_log.write(row["parent_path"] + row["name"] + ' <artifact type="' + row["display_name"] + '" > ')
else:
database_log.write(row["name"] + ' <artifact type="' + row["display_name"] + '" > ')
if isMultiUser:
attribute_cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
else:
attribute_cursor = conn.cursor()
looptry = True
artifact_count += 1
try:
art_id = ""
art_id = str(row["artifact_id"])
# Get attributes for this artifact
if isMultiUser:
attribute_cursor.execute("SELECT blackboard_attributes.source, blackboard_attributes.attribute_type_id, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double FROM blackboard_attributes INNER JOIN blackboard_attribute_types ON blackboard_attributes.attribute_type_id = blackboard_attribute_types.attribute_type_id WHERE artifact_id = %s ORDER BY blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double", [art_id])
else:
attribute_cursor.execute("SELECT blackboard_attributes.source, blackboard_attributes.attribute_type_id, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double FROM blackboard_attributes INNER JOIN blackboard_attribute_types ON blackboard_attributes.attribute_type_id = blackboard_attribute_types.attribute_type_id WHERE artifact_id =? ORDER BY blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double", [art_id])
attributes = attribute_cursor.fetchall()
# Print attributes
if (len(attributes) == 0):
# @@@@ This should be </artifact>
database_log.write(' <artifact/>\n')
row = artifact_cursor.fetchone()
continue
src = attributes[0][0]
for attr in attributes:
numvals = 0
for x in range(3, 6):
if(attr[x] != None):
numvals += 1
if(numvals > 1):
msg = "There were too many values for attribute type: " + attr["display_name"] + " for artifact with id #" + str(row["artifact_id"]) + ".\n"
if(not attr["source"] == src):
msg = "There were inconsistent sources for artifact with id #" + str(row["artifact_id"]) + ".\n"
try:
if attr["value_type"] == 0:
attr_value_as_string = str(attr["value_text"])
elif attr["value_type"] == 1:
attr_value_as_string = str(attr["value_int32"])
elif attr["value_type"] == 2:
attr_value_as_string = str(attr["value_int64"])
if attr["attribute_type_id"] == 36 and id_obj_path_table != -1 and int(attr_value_as_string) > 0: #normalize positive TSK_PATH_IDs from being object id to a path if the obj_id_path_table was generated
attr_value_as_string = id_obj_path_table[int(attr_value_as_string)]
elif attr["value_type"] == 3:
attr_value_as_string = "%20.10f" % float((attr["value_double"])) #use exact format from db schema to avoid python auto format double value to (0E-10) scientific style
elif attr["value_type"] == 4:
attr_value_as_string = "bytes"
elif attr["value_type"] == 5:
attr_value_as_string = str(attr["value_int64"])
if attr["display_name"] == "Associated Artifact":
attr_value_as_string = getAssociatedArtifactType(attribute_cursor, attr_value_as_string, isMultiUser)
patrn = re.compile("[\n\0\a\b\r\f]")
attr_value_as_string = re.sub(patrn, ' ', attr_value_as_string)
database_log.write('<attribute source="' + attr["source"] + '" type="' + attr["display_name"] + '" value="' + attr_value_as_string + '" />')
except IOError as e:
print("IO error")
raise TskDbDiffException("Unexpected IO error while writing to database log." + str(e))
except sqlite3.Error as e:
msg = "Attributes in artifact id (in output DB)# " + str(row["artifact_id"]) + " encountered an error: " + str(e) +" .\n"
print("Attributes in artifact id (in output DB)# ", str(row["artifact_id"]), " encountered an error: ", str(e))
print()
looptry = False
artifact_fail += 1
database_log.write('Error Extracting Attributes')
database_log.close()
raise TskDbDiffException(msg)
finally:
attribute_cursor.close()
# @@@@ This should be </artifact>
database_log.write(' <artifact/>\n')
row = artifact_cursor.fetchone()
if(artifact_fail > 0):
msg ="There were " + str(artifact_count) + " artifacts and " + str(artifact_fail) + " threw an exception while loading.\n"
except Exception as e:
raise TskDbDiffException("Unexpected error while dumping blackboard database: " + str(e))
finally:
database_log.close()
artifact_cursor.close()
conn.close()
# Now sort the file
srtcmdlst = ["sort", unsorted_dump, "-o", bb_dump_file]
subprocess.call(srtcmdlst)
def _dump_output_db_nonbb(db_file, dump_file, isMultiUser, pgSettings):
"""Dumps a database to a text file.
Does not dump the artifact and attributes.
Args:
db_file: a pathto_File, the database file to dump
dump_file: a pathto_File, the location to dump the non-blackboard database items
"""
conn, backup_db_file = db_connect(db_file, isMultiUser, pgSettings)
id_files_table = build_id_files_table(conn.cursor(), isMultiUser)
id_vs_parts_table = build_id_vs_parts_table(conn.cursor(), isMultiUser)
id_vs_info_table = build_id_vs_info_table(conn.cursor(), isMultiUser)
id_fs_info_table = build_id_fs_info_table(conn.cursor(), isMultiUser)
id_objects_table = build_id_objects_table(conn.cursor(), isMultiUser)
id_artifact_types_table = build_id_artifact_types_table(conn.cursor(), isMultiUser)
id_legacy_artifact_types = build_id_legacy_artifact_types_table(conn.cursor(), isMultiUser)
id_reports_table = build_id_reports_table(conn.cursor(), isMultiUser)
id_images_table = build_id_image_names_table(conn.cursor(), isMultiUser)
id_obj_path_table = build_id_obj_path_table(id_files_table, id_objects_table, id_artifact_types_table, id_reports_table, id_images_table)
if isMultiUser: # Use PostgreSQL
os.environ['PGPASSWORD']=pgSettings.password
pgDump = ["pg_dump", "--inserts", "-U", pgSettings.username, "-h", pgSettings.pgHost, "-p", pgSettings.pgPort, "-d", db_file, "-E", "utf-8", "-T", "blackboard_artifacts", "-T", "blackboard_attributes", "-f", "postgreSQLDump.sql"]
subprocess.call(pgDump)
postgreSQL_db = codecs.open("postgreSQLDump.sql", "r", "utf-8")
# Write to the database dump
with codecs.open(dump_file, "wb", "utf_8") as db_log:
dump_line = ''
for line in postgreSQL_db:
line = line.strip('\r\n ')
# Deal with pg_dump result file
if (line.startswith('--') or line.lower().startswith('alter') or "pg_catalog" in line or "idle_in_transaction_session_timeout" in line or not line): # It's comment or alter statement or catalog entry or set idle entry or empty line
continue
elif not line.endswith(';'): # Statement not finished
dump_line += line
continue
else:
dump_line += line
if 'INSERT INTO image_gallery_groups_seen' in dump_line:
dump_line = ''
continue;
dump_line = normalize_db_entry(dump_line, id_obj_path_table, id_vs_parts_table, id_vs_info_table, id_fs_info_table, id_objects_table, id_reports_table, id_images_table, id_legacy_artifact_types)
db_log.write('%s\n' % dump_line)
dump_line = ''
postgreSQL_db.close()
else: # use Sqlite
# Delete the blackboard tables
conn.text_factory = lambda x: x.decode("utf-8", "ignore")
conn.execute("DROP TABLE blackboard_artifacts")
conn.execute("DROP TABLE blackboard_attributes")
# Write to the database dump
with codecs.open(dump_file, "wb", "utf_8") as db_log:
for line in conn.iterdump():
if 'INSERT INTO "image_gallery_groups_seen"' in line:
continue
line = normalize_db_entry(line, id_obj_path_table, id_vs_parts_table, id_vs_info_table, id_fs_info_table, id_objects_table, id_reports_table, id_images_table, id_legacy_artifact_types)
db_log.write('%s\n' % line)
# Now sort the file
srtcmdlst = ["sort", dump_file, "-o", dump_file]
subprocess.call(srtcmdlst)
conn.close()
# cleanup the backup
if backup_db_file:
os.remove(backup_db_file)
return id_obj_path_table
def dump_output_db(db_file, dump_file, bb_dump_file, isMultiUser, pgSettings):
"""Dumps the given database to text files for later comparison.
Args:
db_file: a pathto_File, the database file to dump
dump_file: a pathto_File, the location to dump the non-blackboard database items
bb_dump_file: a pathto_File, the location to dump the blackboard database items
"""
id_obj_path_table = TskDbDiff._dump_output_db_nonbb(db_file, dump_file, isMultiUser, pgSettings)
TskDbDiff._dump_output_db_bb(db_file, bb_dump_file, isMultiUser, pgSettings, id_obj_path_table)
def _get_tmp_file(base, ext):
time = datetime.datetime.now().time().strftime("%H%M%f")
return os.path.join(os.environ['TMP'], base + time + ext)
class TskDbDiffException(Exception):
pass
class PGSettings(object):
def __init__(self, pgHost=None, pgPort=5432, user=None, password=None):
self.pgHost = pgHost
self.pgPort = pgPort
self.username = user
self.password = password
def get_pgHost():
return self.pgHost
def get_pgPort():
return self.pgPort
def get_username():
return self.username
def get_password():
return self.password
def normalize_db_entry(line, files_table, vs_parts_table, vs_info_table, fs_info_table, objects_table, reports_table, images_table, artifact_table):
""" Make testing more consistent and reasonable by doctoring certain db entries.
Args:
line: a String, the line to remove the object id from.
files_table: a map from object ids to file paths.
"""
# Sqlite statement use double quotes for table name, PostgreSQL doesn't. We check both databases results for normalization.
files_index = line.find('INSERT INTO "tsk_files"') > -1 or line.find('INSERT INTO tsk_files ') > -1
path_index = line.find('INSERT INTO "tsk_files_path"') > -1 or line.find('INSERT INTO tsk_files_path ') > -1
object_index = line.find('INSERT INTO "tsk_objects"') > -1 or line.find('INSERT INTO tsk_objects ') > -1
report_index = line.find('INSERT INTO "reports"') > -1 or line.find('INSERT INTO reports ') > -1
layout_index = line.find('INSERT INTO "tsk_file_layout"') > -1 or line.find('INSERT INTO tsk_file_layout ') > -1
data_source_info_index = line.find('INSERT INTO "data_source_info"') > -1 or line.find('INSERT INTO data_source_info ') > -1
event_description_index = line.find('INSERT INTO "tsk_event_descriptions"') > -1 or line.find('INSERT INTO tsk_event_descriptions ') > -1
events_index = line.find('INSERT INTO "tsk_events"') > -1 or line.find('INSERT INTO tsk_events ') > -1
ingest_job_index = line.find('INSERT INTO "ingest_jobs"') > -1 or line.find('INSERT INTO ingest_jobs ') > -1
examiners_index = line.find('INSERT INTO "tsk_examiners"') > -1 or line.find('INSERT INTO tsk_examiners ') > -1
ig_groups_index = line.find('INSERT INTO "image_gallery_groups"') > -1 or line.find('INSERT INTO image_gallery_groups ') > -1
ig_groups_seen_index = line.find('INSERT INTO "image_gallery_groups_seen"') > -1 or line.find('INSERT INTO image_gallery_groups_seen ') > -1
parens = line[line.find('(') + 1 : line.rfind(')')]
no_space_parens = parens.replace(" ", "")
fields_list = list(csv.reader([no_space_parens], quotechar="'"))[0]
#Add back in the quotechar for values that were originally wrapped (csv reader consumes this character)
fields_list_with_quotes = []
ptr = 0
for field in fields_list:
if(len(field) == 0):
field = "'" + field + "'"
else:
start = no_space_parens.find(field, ptr)
if((start - 1) >= 0 and no_space_parens[start - 1] == '\''):
if((start + len(field)) < len(no_space_parens) and no_space_parens[start + len(field)] == '\''):
field = "'" + field + "'"
fields_list_with_quotes.append(field)
if(ptr > 0):
#Add one for each comma that is used to separate values in the original string
ptr+=1
ptr += len(field)
fields_list = fields_list_with_quotes
# remove object ID
if files_index:
newLine = ('INSERT INTO "tsk_files" VALUES(' + ', '.join(fields_list[1:]) + ');')
return newLine
# remove group ID
elif ig_groups_index:
newLine = ('INSERT INTO "image_gallery_groups" VALUES(' + ', '.join(fields_list[1:]) + ');')
return newLine
#remove id field
elif ig_groups_seen_index:
# Only removing the id and group_id fields for now. May need to care about examiner_id and seen fields in future.
newLine = ('INSERT INTO "image_gallery_groups_seen" VALUES(' + ', '.join(fields_list[2:]) + ');')
return newLine
# remove object ID
elif path_index:
obj_id = int(fields_list[0])
objValue = files_table[obj_id]
# remove the obj_id from ModuleOutput/EmbeddedFileExtractor directory
idx_pre = fields_list[1].find('EmbeddedFileExtractor') + len('EmbeddedFileExtractor')
if idx_pre > -1:
idx_pos = fields_list[1].find('\\', idx_pre + 2)
dir_to_replace = fields_list[1][idx_pre + 1 : idx_pos] # +1 to skip the file seperator
dir_to_replace = dir_to_replace[0:dir_to_replace.rfind('_')]
pathValue = fields_list[1][:idx_pre+1] + dir_to_replace + fields_list[1][idx_pos:]
else:
pathValue = fields_list[1]
# remove localhost from postgres par_obj_name
multiOutput_idx = pathValue.find('ModuleOutput')
if multiOutput_idx > -1:
pathValue = "'" + pathValue[pathValue.find('ModuleOutput'):] #postgres par_obj_name include losthost
newLine = ('INSERT INTO "tsk_files_path" VALUES(' + objValue + ', ' + pathValue + ', ' + ', '.join(fields_list[2:]) + ');')
return newLine
# remove object ID
elif layout_index:
obj_id = fields_list[0]
path= files_table[int(obj_id)]
newLine = ('INSERT INTO "tsk_file_layout" VALUES(' + path + ', ' + ', '.join(fields_list[1:]) + ');')
return newLine
# remove object ID
elif object_index:
obj_id = fields_list[0]
parent_id = fields_list[1]
newLine = 'INSERT INTO "tsk_objects" VALUES('
path = None
parent_path = None
#if obj_id or parent_id is invalid literal, we simple return the values as it is
try:
obj_id = int(obj_id)
if parent_id != 'NULL':
parent_id = int(parent_id)
except Exception as e:
print(obj_id, parent_id)
return line
if obj_id in files_table.keys():
path = files_table[obj_id]
elif obj_id in vs_parts_table.keys():
path = vs_parts_table[obj_id]
elif obj_id in vs_info_table.keys():
path = vs_info_table[obj_id]
elif obj_id in fs_info_table.keys():
path = fs_info_table[obj_id]
elif obj_id in reports_table.keys():
path = reports_table[obj_id]
# remove host name (for multi-user) and dates/times from path for reports
if path is not None:
if 'ModuleOutput' in path:
# skip past the host name (if any)
path = path[path.find('ModuleOutput'):]
if 'BulkExtractor' in path or 'Smirk' in path:
# chop off the last folder (which contains a date/time)
path = path[:path.rfind('\\')]
if 'Reports\\AutopsyTestCase HTML Report' in path:
path = 'Reports\\AutopsyTestCase HTML Report'
if parent_id in files_table.keys():
parent_path = files_table[parent_id]
elif parent_id in vs_parts_table.keys():
parent_path = vs_parts_table[parent_id]
elif parent_id in vs_info_table.keys():
parent_path = vs_info_table[parent_id]
elif parent_id in fs_info_table.keys():
parent_path = fs_info_table[parent_id]
elif parent_id in images_table.keys():
parent_path = images_table[parent_id]
elif parent_id == 'NULL':
parent_path = "NULL"
# Remove host name (for multi-user) from parent_path
if parent_path is not None:
if 'ModuleOutput' in parent_path:
# skip past the host name (if any)
parent_path = parent_path[parent_path.find('ModuleOutput'):]
if path and parent_path:
return newLine + path + ', ' + parent_path + ', ' + ', '.join(fields_list[2:]) + ');'
else:
return line
# remove time-based information, ie Test_6/11/14 -> Test
elif report_index:
fields_list[1] = "AutopsyTestCase"
fields_list[2] = "0"
newLine = ('INSERT INTO "reports" VALUES(' + ','.join(fields_list[1:]) + ');') # remove report_id
return newLine
elif data_source_info_index:
fields_list[1] = "{device id}"
newLine = ('INSERT INTO "data_source_info" VALUES(' + ','.join(fields_list) + ');')
return newLine
elif ingest_job_index:
fields_list[2] = "{host_name}"
start_time = int(fields_list[3])
end_time = int(fields_list[4])
if (start_time <= end_time):
fields_list[3] = "0"
fields_list[4] = "0"
newLine = ('INSERT INTO "ingest_jobs" VALUES(' + ','.join(fields_list) + ');')
return newLine
elif examiners_index:
fields_list[1] = "{examiner_name}"
newLine = ('INSERT INTO "tsk_examiners" VALUES(' + ','.join(fields_list) + ');')
return newLine
# remove all timing dependent columns from events table
elif events_index:
newLine = ('INSERT INTO "tsk_events" VALUES(' + ','.join(fields_list[1:2]) + ');')
return newLine
# remove object ids from event description table
elif event_description_index:
# replace object ids with information that is deterministic
file_obj_id = int(fields_list[5])
object_id = int(fields_list[4])
legacy_artifact_id = 'NULL'
if (fields_list[6] != 'NULL'):
legacy_artifact_id = int(fields_list[6])
if file_obj_id != 'NULL' and file_obj_id in files_table.keys():
fields_list[5] = files_table[file_obj_id]
if object_id != 'NULL' and object_id in files_table.keys():
fields_list[4] = files_table[object_id]
if legacy_artifact_id != 'NULL' and legacy_artifact_id in artifact_table.keys():
fields_list[6] = artifact_table[legacy_artifact_id]
newLine = ('INSERT INTO "tsk_event_descriptions" VALUES(' + ','.join(fields_list[1:]) + ');') # remove report_id
return newLine
else:
return line
def getAssociatedArtifactType(cur, artifact_id, isMultiUser):
if isMultiUser:
cur.execute("SELECT tsk_files.parent_path, blackboard_artifact_types.display_name FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_files ON tsk_files.obj_id = blackboard_artifacts.obj_id WHERE artifact_id=%s",[artifact_id])
else:
cur.execute("SELECT tsk_files.parent_path, blackboard_artifact_types.display_name FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_files ON tsk_files.obj_id = blackboard_artifacts.obj_id WHERE artifact_id=?",[artifact_id])
info = cur.fetchone()
return "File path: " + info[0] + " Artifact Type: " + info[1]
def build_id_files_table(db_cursor, isPostgreSQL):
"""Build the map of object ids to file paths.
Args:
db_cursor: the database cursor
"""
# for each row in the db, take the object id, parent path, and name, then create a tuple in the dictionary
# with the object id as the key and the full file path (parent + name) as the value
mapping = dict([(row[0], str(row[1]) + str(row[2])) for row in sql_select_execute(db_cursor, isPostgreSQL, "SELECT obj_id, parent_path, name FROM tsk_files")])
return mapping
def build_id_vs_parts_table(db_cursor, isPostgreSQL):
"""Build the map of object ids to vs_parts.
Args:
db_cursor: the database cursor
"""
# for each row in the db, take the object id, addr, and start, then create a tuple in the dictionary
# with the object id as the key and (addr + start) as the value
mapping = dict([(row[0], str(row[1]) + '_' + str(row[2])) for row in sql_select_execute(db_cursor, isPostgreSQL, "SELECT obj_id, addr, start FROM tsk_vs_parts")])
return mapping
def build_id_vs_info_table(db_cursor, isPostgreSQL):
"""Build the map of object ids to vs_info.
Args:
db_cursor: the database cursor
"""
# for each row in the db, take the object id, vs_type, and img_offset, then create a tuple in the dictionary
# with the object id as the key and (vs_type + img_offset) as the value
mapping = dict([(row[0], str(row[1]) + '_' + str(row[2])) for row in sql_select_execute(db_cursor, isPostgreSQL, "SELECT obj_id, vs_type, img_offset FROM tsk_vs_info")])
return mapping
def build_id_fs_info_table(db_cursor, isPostgreSQL):
"""Build the map of object ids to fs_info.
Args:
db_cursor: the database cursor
"""
# for each row in the db, take the object id, img_offset, and fs_type, then create a tuple in the dictionary
# with the object id as the key and (img_offset + fs_type) as the value
mapping = dict([(row[0], str(row[1]) + '_' + str(row[2])) for row in sql_select_execute(db_cursor, isPostgreSQL, "SELECT obj_id, img_offset, fs_type FROM tsk_fs_info")])
return mapping
def build_id_objects_table(db_cursor, isPostgreSQL):
"""Build the map of object ids to par_id.
Args:
db_cursor: the database cursor
"""
# for each row in the db, take the object id, par_obj_id, then create a tuple in the dictionary
# with the object id as the key and par_obj_id, type as the value
mapping = dict([(row[0], [row[1], row[2]]) for row in sql_select_execute(db_cursor, isPostgreSQL, "SELECT * FROM tsk_objects")])
return mapping
def build_id_image_names_table(db_cursor, isPostgreSQL):
"""Build the map of object ids to name.
Args:
db_cursor: the database cursor
"""
# for each row in the db, take the object id and name then create a tuple in the dictionary
# with the object id as the key and name, type as the value
mapping = dict([(row[0], row[1]) for row in sql_select_execute(db_cursor, isPostgreSQL, "SELECT obj_id, name FROM tsk_image_names WHERE sequence=0")])
#data_sources which are logical file sets will be found in the files table
return mapping
def build_id_artifact_types_table(db_cursor, isPostgreSQL):
"""Build the map of object ids to artifact ids.
Args:
db_cursor: the database cursor
"""
# for each row in the db, take the object id, par_obj_id, then create a tuple in the dictionary
# with the object id as the key and artifact type as the value
mapping = dict([(row[0], row[1]) for row in sql_select_execute(db_cursor, isPostgreSQL, "SELECT blackboard_artifacts.artifact_obj_id, blackboard_artifact_types.type_name FROM blackboard_artifacts INNER JOIN blackboard_artifact_types ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id ")])
return mapping
def build_id_legacy_artifact_types_table(db_cursor, isPostgreSQL):
"""Build the map of legacy artifact ids to artifact type.
Args:
db_cursor: the database cursor
"""
# for each row in the db, take the legacy artifact id then create a tuple in the dictionary
# with the artifact id as the key and artifact type as the value
mapping = dict([(row[0], row[1]) for row in sql_select_execute(db_cursor, isPostgreSQL, "SELECT blackboard_artifacts.artifact_id, blackboard_artifact_types.type_name FROM blackboard_artifacts INNER JOIN blackboard_artifact_types ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id ")])
return mapping
def build_id_reports_table(db_cursor, isPostgreSQL):
"""Build the map of report object ids to report path.
Args:
db_cursor: the database cursor
"""
# for each row in the reports table in the db, create an obj_id -> path map
mapping = dict([(row[0], row[1]) for row in sql_select_execute(db_cursor, isPostgreSQL, "SELECT obj_id, path FROM reports")])
return mapping
def build_id_obj_path_table(files_table, objects_table, artifacts_table, reports_table, images_table):
"""Build the map of object ids to artifact ids.
Args:
files_table: obj_id, path
objects_table: obj_id, par_obj_id, type
artifacts_table: obj_id, artifact_type_name
reports_table: obj_id, path
"""
# make a copy of files_table and update it with new data from artifacts_table and reports_table
mapping = files_table.copy()
for k, v in objects_table.items():
path = ""
if k not in mapping.keys(): # If the mapping table doesn't have data for obj_id
if k in reports_table.keys(): # For a report we use the report path
par_obj_id = v[0]
if par_obj_id is not None:
mapping[k] = reports_table[k]
elif k in artifacts_table.keys(): # For an artifact we use it's par_obj_id's path+name plus it's artifact_type name
par_obj_id = v[0] # The parent of an artifact can be a file or a report
if par_obj_id in mapping.keys():
path = mapping[par_obj_id]
elif par_obj_id in reports_table.keys():
path = reports_table[par_obj_id]
elif par_obj_id in images_table.keys():
path = images_table[par_obj_id]
mapping[k] = path + "/" + artifacts_table[k]
elif v[0] not in mapping.keys():
if v[0] in artifacts_table.keys():
par_obj_id = objects_table[v[0]]
path = mapping[par_obj_id]
mapping[k] = path + "/" + artifacts_table[v[0]]
return mapping
def db_connect(db_file, isMultiUser, pgSettings=None):
if isMultiUser: # use PostgreSQL
try:
return psycopg2.connect("dbname=" + db_file + " user=" + pgSettings.username + " host=" + pgSettings.pgHost + " password=" + pgSettings.password), None
except:
print("Failed to connect to the database: " + db_file)
else: # Sqlite
# Make a copy that we can modify
backup_db_file = TskDbDiff._get_tmp_file("tsk_backup_db", ".db")
shutil.copy(db_file, backup_db_file)
# We sometimes get situations with messed up permissions
os.chmod (backup_db_file, 0o777)
return sqlite3.connect(backup_db_file), backup_db_file
def sql_select_execute(cursor, isPostgreSQL, sql_stmt):
if isPostgreSQL:
cursor.execute(sql_stmt)
return cursor.fetchall()
else:
return cursor.execute(sql_stmt)
def main():
try:
sys.argv.pop(0)
output_db = sys.argv.pop(0)
gold_db = sys.argv.pop(0)
except:
print("usage: tskdbdiff [OUTPUT DB PATH] [GOLD DB PATH]")
sys.exit(1)
db_diff = TskDbDiff(output_db, gold_db, output_dir=".")
dump_passed, bb_dump_passed = db_diff.run_diff()
if dump_passed and bb_dump_passed:
print("Database comparison passed.")
if not dump_passed:
print("Non blackboard database comparison failed.")
if not bb_dump_passed:
print("Blackboard database comparison failed.")
sys.exit(0)
if __name__ == "__main__":
if sys.hexversion < 0x03000000:
print("Python 3 required")
sys.exit(1)
main()
| |
import sys
from django import http
from django.core import signals
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
http.fix_IE_for_attach,
http.fix_IE_for_vary,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__).
"""
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
dot = middleware_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured('%s isn\'t a middleware module' % middleware_path)
mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:]
try:
mod = import_module(mw_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured('Error importing middleware %s: "%s"' % (mw_module, e))
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname))
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
from django.core import exceptions, urlresolvers
from django.conf import settings
try:
try:
# Setup default url resolver for this thread.
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
return response
if hasattr(request, "urlconf"):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
return response
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception, e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
return response
raise
# Complain if the view returned None (a common error).
if response is None:
try:
view_name = callback.func_name # If it's a function
except AttributeError:
view_name = callback.__class__.__name__ + '.__call__' # If it's a class
raise ValueError("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name))
return response
except http.Http404, e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
return callback(request, **param_dict)
except:
try:
return self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
except exceptions.PermissionDenied:
return http.HttpResponseForbidden('<h1>Permission denied</h1>')
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else, including SuspiciousOperation, etc.
# Get the exception info now, in case another exception is thrown later.
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
return self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
# Reset URLconf for this thread on the way out for complete
# isolation of request.urlconf
urlresolvers.set_urlconf(None)
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
from django.conf import settings
from django.core.mail import mail_admins
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
# When DEBUG is False, send an error message to the admins.
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (self._get_traceback(exc_info), request_repr)
mail_admins(subject, message, fail_silently=True)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
raise exc_info[1], None, exc_info[2]
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def _get_traceback(self, exc_info=None):
"Helper function to return the traceback as a string"
import traceback
return '\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless DJANGO_USE_POST_REWRITE is set (to
anything).
"""
from django.conf import settings
if settings.FORCE_SCRIPT_NAME is not None:
return force_unicode(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every webserver (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = environ.get('SCRIPT_URL', u'')
if not script_url:
script_url = environ.get('REDIRECT_URL', u'')
if script_url:
return force_unicode(script_url[:-len(environ.get('PATH_INFO', ''))])
return force_unicode(environ.get('SCRIPT_NAME', u''))
| |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Jakob Schnitzer.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Converts tracks or albums to external directory
"""
from __future__ import division, absolute_import, print_function
import os
import threading
import subprocess
import tempfile
import shlex
import six
from string import Template
import platform
from beets import ui, util, plugins, config
from beets.plugins import BeetsPlugin
from confuse import ConfigTypeError
from beets import art
from beets.util.artresizer import ArtResizer
from beets.library import parse_query_string
from beets.library import Item
_fs_lock = threading.Lock()
_temp_files = [] # Keep track of temporary transcoded files for deletion.
# Some convenient alternate names for formats.
ALIASES = {
u'wma': u'windows media',
u'vorbis': u'ogg',
}
LOSSLESS_FORMATS = ['ape', 'flac', 'alac', 'wav', 'aiff']
def replace_ext(path, ext):
"""Return the path with its extension replaced by `ext`.
The new extension must not contain a leading dot.
"""
ext_dot = b'.' + ext
return os.path.splitext(path)[0] + ext_dot
def get_format(fmt=None):
"""Return the command template and the extension from the config.
"""
if not fmt:
fmt = config['convert']['format'].as_str().lower()
fmt = ALIASES.get(fmt, fmt)
try:
format_info = config['convert']['formats'][fmt].get(dict)
command = format_info['command']
extension = format_info.get('extension', fmt)
except KeyError:
raise ui.UserError(
u'convert: format {0} needs the "command" field'
.format(fmt)
)
except ConfigTypeError:
command = config['convert']['formats'][fmt].get(str)
extension = fmt
# Convenience and backwards-compatibility shortcuts.
keys = config['convert'].keys()
if 'command' in keys:
command = config['convert']['command'].as_str()
elif 'opts' in keys:
# Undocumented option for backwards compatibility with < 1.3.1.
command = u'ffmpeg -i $source -y {0} $dest'.format(
config['convert']['opts'].as_str()
)
if 'extension' in keys:
extension = config['convert']['extension'].as_str()
return (command.encode('utf-8'), extension.encode('utf-8'))
def should_transcode(item, fmt):
"""Determine whether the item should be transcoded as part of
conversion (i.e., its bitrate is high or it has the wrong format).
"""
no_convert_queries = config['convert']['no_convert'].as_str_seq()
if no_convert_queries:
for query_string in no_convert_queries:
query, _ = parse_query_string(query_string, Item)
if query.match(item):
return False
if config['convert']['never_convert_lossy_files'] and \
not (item.format.lower() in LOSSLESS_FORMATS):
return False
maxbr = config['convert']['max_bitrate'].get(int)
return fmt.lower() != item.format.lower() or \
item.bitrate >= 1000 * maxbr
class ConvertPlugin(BeetsPlugin):
def __init__(self):
super(ConvertPlugin, self).__init__()
self.config.add({
u'dest': None,
u'pretend': False,
u'link': False,
u'hardlink': False,
u'threads': util.cpu_count(),
u'format': u'mp3',
u'id3v23': u'inherit',
u'formats': {
u'aac': {
u'command': u'ffmpeg -i $source -y -vn -acodec aac '
u'-aq 1 $dest',
u'extension': u'm4a',
},
u'alac': {
u'command': u'ffmpeg -i $source -y -vn -acodec alac $dest',
u'extension': u'm4a',
},
u'flac': u'ffmpeg -i $source -y -vn -acodec flac $dest',
u'mp3': u'ffmpeg -i $source -y -vn -aq 2 $dest',
u'opus':
u'ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest',
u'ogg':
u'ffmpeg -i $source -y -vn -acodec libvorbis -aq 3 $dest',
u'wma':
u'ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest',
},
u'max_bitrate': 500,
u'auto': False,
u'tmpdir': None,
u'quiet': False,
u'embed': True,
u'paths': {},
u'no_convert': u'',
u'never_convert_lossy_files': False,
u'copy_album_art': False,
u'album_art_maxwidth': 0,
u'delete_originals': False,
})
self.early_import_stages = [self.auto_convert]
self.register_listener('import_task_files', self._cleanup)
def commands(self):
cmd = ui.Subcommand('convert', help=u'convert to external location')
cmd.parser.add_option('-p', '--pretend', action='store_true',
help=u'show actions but do nothing')
cmd.parser.add_option('-t', '--threads', action='store', type='int',
help=u'change the number of threads, \
defaults to maximum available processors')
cmd.parser.add_option('-k', '--keep-new', action='store_true',
dest='keep_new', help=u'keep only the converted \
and move the old files')
cmd.parser.add_option('-d', '--dest', action='store',
help=u'set the destination directory')
cmd.parser.add_option('-f', '--format', action='store', dest='format',
help=u'set the target format of the tracks')
cmd.parser.add_option('-y', '--yes', action='store_true', dest='yes',
help=u'do not ask for confirmation')
cmd.parser.add_option('-l', '--link', action='store_true', dest='link',
help=u'symlink files that do not \
need transcoding.')
cmd.parser.add_option('-H', '--hardlink', action='store_true',
dest='hardlink',
help=u'hardlink files that do not \
need transcoding. Overrides --link.')
cmd.parser.add_album_option()
cmd.func = self.convert_func
return [cmd]
def auto_convert(self, config, task):
if self.config['auto']:
for item in task.imported_items():
self.convert_on_import(config.lib, item)
# Utilities converted from functions to methods on logging overhaul
def encode(self, command, source, dest, pretend=False):
"""Encode `source` to `dest` using command template `command`.
Raises `subprocess.CalledProcessError` if the command exited with a
non-zero status code.
"""
# The paths and arguments must be bytes.
assert isinstance(command, bytes)
assert isinstance(source, bytes)
assert isinstance(dest, bytes)
quiet = self.config['quiet'].get(bool)
if not quiet and not pretend:
self._log.info(u'Encoding {0}', util.displayable_path(source))
# On Python 3, we need to construct the command to invoke as a
# Unicode string. On Unix, this is a little unfortunate---the OS is
# expecting bytes---so we use surrogate escaping and decode with the
# argument encoding, which is the same encoding that will then be
# *reversed* to recover the same bytes before invoking the OS. On
# Windows, we want to preserve the Unicode filename "as is."
if not six.PY2:
command = command.decode(util.arg_encoding(), 'surrogateescape')
if platform.system() == 'Windows':
source = source.decode(util._fsencoding())
dest = dest.decode(util._fsencoding())
else:
source = source.decode(util.arg_encoding(), 'surrogateescape')
dest = dest.decode(util.arg_encoding(), 'surrogateescape')
# Substitute $source and $dest in the argument list.
args = shlex.split(command)
encode_cmd = []
for i, arg in enumerate(args):
args[i] = Template(arg).safe_substitute({
'source': source,
'dest': dest,
})
if six.PY2:
encode_cmd.append(args[i])
else:
encode_cmd.append(args[i].encode(util.arg_encoding()))
if pretend:
self._log.info(u'{0}', u' '.join(ui.decargs(args)))
return
try:
util.command_output(encode_cmd)
except subprocess.CalledProcessError as exc:
# Something went wrong (probably Ctrl+C), remove temporary files
self._log.info(u'Encoding {0} failed. Cleaning up...',
util.displayable_path(source))
self._log.debug(u'Command {0} exited with status {1}: {2}',
args,
exc.returncode,
exc.output)
util.remove(dest)
util.prune_dirs(os.path.dirname(dest))
raise
except OSError as exc:
raise ui.UserError(
u"convert: couldn't invoke '{0}': {1}".format(
u' '.join(ui.decargs(args)), exc
)
)
if not quiet and not pretend:
self._log.info(u'Finished encoding {0}',
util.displayable_path(source))
def convert_item(self, dest_dir, keep_new, path_formats, fmt,
pretend=False, link=False, hardlink=False):
"""A pipeline thread that converts `Item` objects from a
library.
"""
command, ext = get_format(fmt)
item, original, converted = None, None, None
while True:
item = yield (item, original, converted)
dest = item.destination(basedir=dest_dir,
path_formats=path_formats)
# When keeping the new file in the library, we first move the
# current (pristine) file to the destination. We'll then copy it
# back to its old path or transcode it to a new path.
if keep_new:
original = dest
converted = item.path
if should_transcode(item, fmt):
converted = replace_ext(converted, ext)
else:
original = item.path
if should_transcode(item, fmt):
dest = replace_ext(dest, ext)
converted = dest
# Ensure that only one thread tries to create directories at a
# time. (The existence check is not atomic with the directory
# creation inside this function.)
if not pretend:
with _fs_lock:
util.mkdirall(dest)
if os.path.exists(util.syspath(dest)):
self._log.info(u'Skipping {0} (target file exists)',
util.displayable_path(item.path))
continue
if keep_new:
if pretend:
self._log.info(u'mv {0} {1}',
util.displayable_path(item.path),
util.displayable_path(original))
else:
self._log.info(u'Moving to {0}',
util.displayable_path(original))
util.move(item.path, original)
if should_transcode(item, fmt):
linked = False
try:
self.encode(command, original, converted, pretend)
except subprocess.CalledProcessError:
continue
else:
linked = link or hardlink
if pretend:
msg = 'ln' if hardlink else ('ln -s' if link else 'cp')
self._log.info(u'{2} {0} {1}',
util.displayable_path(original),
util.displayable_path(converted),
msg)
else:
# No transcoding necessary.
msg = 'Hardlinking' if hardlink \
else ('Linking' if link else 'Copying')
self._log.info(u'{1} {0}',
util.displayable_path(item.path),
msg)
if hardlink:
util.hardlink(original, converted)
elif link:
util.link(original, converted)
else:
util.copy(original, converted)
if pretend:
continue
id3v23 = self.config['id3v23'].as_choice([True, False, 'inherit'])
if id3v23 == 'inherit':
id3v23 = None
# Write tags from the database to the converted file.
item.try_write(path=converted, id3v23=id3v23)
if keep_new:
# If we're keeping the transcoded file, read it again (after
# writing) to get new bitrate, duration, etc.
item.path = converted
item.read()
item.store() # Store new path and audio data.
if self.config['embed'] and not linked:
album = item.get_album()
if album and album.artpath:
self._log.debug(u'embedding album art from {}',
util.displayable_path(album.artpath))
art.embed_item(self._log, item, album.artpath,
itempath=converted, id3v23=id3v23)
if keep_new:
plugins.send('after_convert', item=item,
dest=dest, keepnew=True)
else:
plugins.send('after_convert', item=item,
dest=converted, keepnew=False)
def copy_album_art(self, album, dest_dir, path_formats, pretend=False,
link=False, hardlink=False):
"""Copies or converts the associated cover art of the album. Album must
have at least one track.
"""
if not album or not album.artpath:
return
album_item = album.items().get()
# Album shouldn't be empty.
if not album_item:
return
# Get the destination of the first item (track) of the album, we use
# this function to format the path accordingly to path_formats.
dest = album_item.destination(basedir=dest_dir,
path_formats=path_formats)
# Remove item from the path.
dest = os.path.join(*util.components(dest)[:-1])
dest = album.art_destination(album.artpath, item_dir=dest)
if album.artpath == dest:
return
if not pretend:
util.mkdirall(dest)
if os.path.exists(util.syspath(dest)):
self._log.info(u'Skipping {0} (target file exists)',
util.displayable_path(album.artpath))
return
# Decide whether we need to resize the cover-art image.
resize = False
maxwidth = None
if self.config['album_art_maxwidth']:
maxwidth = self.config['album_art_maxwidth'].get(int)
size = ArtResizer.shared.get_size(album.artpath)
self._log.debug('image size: {}', size)
if size:
resize = size[0] > maxwidth
else:
self._log.warning(u'Could not get size of image (please see '
u'documentation for dependencies).')
# Either copy or resize (while copying) the image.
if resize:
self._log.info(u'Resizing cover art from {0} to {1}',
util.displayable_path(album.artpath),
util.displayable_path(dest))
if not pretend:
ArtResizer.shared.resize(maxwidth, album.artpath, dest)
else:
if pretend:
msg = 'ln' if hardlink else ('ln -s' if link else 'cp')
self._log.info(u'{2} {0} {1}',
util.displayable_path(album.artpath),
util.displayable_path(dest),
msg)
else:
msg = 'Hardlinking' if hardlink \
else ('Linking' if link else 'Copying')
self._log.info(u'{2} cover art from {0} to {1}',
util.displayable_path(album.artpath),
util.displayable_path(dest),
msg)
if hardlink:
util.hardlink(album.artpath, dest)
elif link:
util.link(album.artpath, dest)
else:
util.copy(album.artpath, dest)
def convert_func(self, lib, opts, args):
dest = opts.dest or self.config['dest'].get()
if not dest:
raise ui.UserError(u'no convert destination set')
dest = util.bytestring_path(dest)
threads = opts.threads or self.config['threads'].get(int)
path_formats = ui.get_path_formats(self.config['paths'] or None)
fmt = opts.format or self.config['format'].as_str().lower()
if opts.pretend is not None:
pretend = opts.pretend
else:
pretend = self.config['pretend'].get(bool)
if opts.hardlink is not None:
hardlink = opts.hardlink
link = False
elif opts.link is not None:
hardlink = False
link = opts.link
else:
hardlink = self.config['hardlink'].get(bool)
link = self.config['link'].get(bool)
if opts.album:
albums = lib.albums(ui.decargs(args))
items = [i for a in albums for i in a.items()]
if not pretend:
for a in albums:
ui.print_(format(a, u''))
else:
items = list(lib.items(ui.decargs(args)))
if not pretend:
for i in items:
ui.print_(format(i, u''))
if not items:
self._log.error(u'Empty query result.')
return
if not (pretend or opts.yes or ui.input_yn(u"Convert? (Y/n)")):
return
if opts.album and self.config['copy_album_art']:
for album in albums:
self.copy_album_art(album, dest, path_formats, pretend,
link, hardlink)
convert = [self.convert_item(dest,
opts.keep_new,
path_formats,
fmt,
pretend,
link,
hardlink)
for _ in range(threads)]
pipe = util.pipeline.Pipeline([iter(items), convert])
pipe.run_parallel()
def convert_on_import(self, lib, item):
"""Transcode a file automatically after it is imported into the
library.
"""
fmt = self.config['format'].as_str().lower()
if should_transcode(item, fmt):
command, ext = get_format()
# Create a temporary file for the conversion.
tmpdir = self.config['tmpdir'].get()
if tmpdir:
tmpdir = util.py3_path(util.bytestring_path(tmpdir))
fd, dest = tempfile.mkstemp(util.py3_path(b'.' + ext), dir=tmpdir)
os.close(fd)
dest = util.bytestring_path(dest)
_temp_files.append(dest) # Delete the transcode later.
# Convert.
try:
self.encode(command, item.path, dest)
except subprocess.CalledProcessError:
return
# Change the newly-imported database entry to point to the
# converted file.
source_path = item.path
item.path = dest
item.write()
item.read() # Load new audio information data.
item.store()
if self.config['delete_originals']:
self._log.info(u'Removing original file {0}', source_path)
util.remove(source_path, False)
def _cleanup(self, task, session):
for path in task.old_paths:
if path in _temp_files:
if os.path.isfile(path):
util.remove(path)
_temp_files.remove(path)
| |
import numpy as np
import tensorflow as tf
import h5py
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
# Functions for initializing neural nets parameters
def init_weight_variable(shape, nameIn):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial, name=nameIn)
def init_bias_variable(shape, nameIn):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial, name=nameIn)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
def monitorStepSize(cost_function, vars):
'''
Monitor the relative size of the steps and weights - see how much, as a percentage,
the weights are changing per training step.
- cost_function --- the cost function to evaluate to find the gradient
- vars --- Holds the tensorflow variables that you want to track
'''
def printGradsAndVars(grads_and_vars, X, y_):
'''
grads_and_vars is a list of tuples, where each tuple has two elements.
The first element in each tuple is a tf.Tensor that holds gradients.
The second element in each tuple is a tf.Variable that holds the variables that
the gradients in the first element correspond to
Code stolen from: https://stackoverflow.com/questions/40654881/how-to-print-current-variables-gradients-values-with-vars-names-in-tensorflow
'''
# assumes an interactive session is already started, so can evaluate the gradient tensor
# instead of needing a sess.run() call
def printNestedList(lst):
if not type(lst) is np.ndarray:
return
count = 0
for el in lst:
if not type(el) is np.ndarray:
print(type(el))
else:
printNestedList(el)
#self, X_train, y_train, X_val, y_val, num_freq, filter_row, filter_col, k1, k2, learningRate, pooling_strategy):
# set up property that makes it only be set once
# we'll use this to avoid adding tensors to the graph multiple times
import functools
def lazy_property(function):
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class Model:
def __init__(self, num_freq, filepath, filter_row, filter_col, k1, k2, learningRate, pooling_strategy, debug):
'''
Initializer for the model
'''
# start by loading the data
[self.X_train, self.y_train, self.X_val, self.y_val] = Model.loadData(filepath)
# store the parameters sent to init that define our model
self.num_freq = num_freq
self.filter_row = filter_row
self.filter_col = filter_col
self.k1 = k1
self.k2 = k2
self.learningRate = learningRate
self.pooling_strategy = pooling_strategy
self.debug = debug
# find num_training_vec, total_features, num_frames, num_classes, and l from the shape of the data
# and store them
self.storeParamsFromData()
# Set-up and store the input and output placeholders
x = tf.placeholder(tf.float32, [None, self.total_features])
y_ = tf.placeholder(tf.float32, [None, self.num_classes])
self.x = x
self.y_ = y_
# Setup and store tensor that performs the one-hot encoding
y_train_OHEnc = tf.one_hot(self.y_train.copy(), self.num_classes)
y_val_OHEnc = tf.one_hot(self.y_val.copy(), self.num_classes)
self.y_train_OHEnc = y_train_OHEnc
self.y_val_OHEnc = y_val_OHEnc
# create each lazy_property
# each lazy_property will add tensors to the graph
self.y_conv
self.cross_entropy
self.train_step
self.accuracy
# properties for use in debugging
if self.debug:
self.grads_and_vars
# print to the user that the network has been set up, along with its properties
print("Setting up Neural net with %g x %g filters, k1 = %g, k2 = %g, learningRate = %g"%(filter_row, filter_col, k1, k2, learningRate))
def storeParamsFromData(self):
'''
Calculate and store parameters from the raw data
total_features: The number of CQT coefficients total (incldues all context frames)
num_training_vec: The number of training examples in your dataset
num_frames: The number of context frames in each training example (total_features / num_freq)
num_classes: The number of songs we're distinguishing between in our output
l: The length of our second convolutional kernel - for now, its equal to num_frames
'''
# Neural-network model set-up
# calculating some values which will be nice as we set up the model
num_training_vec, total_features = self.X_train.shape
num_frames = int(total_features / self.num_freq)
print('-- Num frames: {}'.format(num_frames))
num_classes = int(max(self.y_train.max(), self.y_val.max()) + 1)
l = num_frames
# store what will be helpful later
self.total_features = total_features
self.num_training_vec = num_training_vec
self.num_frames = num_frames
self.num_classes = num_classes
self.l = l
@lazy_property
def y_conv(self):
# reshape the input into the form of a spectrograph
x_image = tf.reshape(self.x, [-1, self.num_freq, self.num_frames, 1])
x_image = tf.identity(x_image, name="x_image")
# first convolutional layer parameters
W_conv1 = init_weight_variable([self.filter_row, self.filter_col, 1, self.k1], "W_conv1")
b_conv1 = init_bias_variable([k1], "b_conv1")
# tensor that computes the output of the first convolutional layer
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_conv1 = tf.identity(h_conv1, name="h_conv_1")
# flatten out the output of the first convolutional layer to pass to the softmax layer
h_conv1_flat = tf.reshape(h_conv1, [-1, (self.num_freq - self.filter_row + 1) * (self.num_frames - self.filter_col + 1) * self.k1])
h_conv1_flat = tf.identity(h_conv1_flat, name="h_conv1_flat")
# softmax layer parameters
W_sm = init_weight_variable([(self.num_freq - self.filter_row + 1) * (self.num_frames - self.filter_col + 1) * self.k1, self.num_classes], "W_sm")
b_sm = init_bias_variable([self.num_classes], "b_sm")
# the output of the layer - un-normalized and without a non-linearity
# since cross_entropy_with_logits takes care of that
y_conv = tf.matmul(h_conv1_flat, W_sm) + b_sm
y_conv = tf.identity(y_conv, name="y_conv")
return y_conv # would want to softmax it to get an actual prediction
@lazy_property
def cross_entropy(self):
'''
Create a tensor that computes the cross entropy cost
Use the placeholder y_ as the labels, with input y_conv
Note that softmax_cross_entropy_with_logits takes care of normalizing
y_conv to make it a probability distribution
This tensor can be accessed using: self.cross_entropy
'''
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv))
cross_entropy = tf.identity(cross_entropy, name="cross_entropy")
return cross_entropy
@lazy_property
def optimizer(self):
'''
Create a tensor that represents the optimizer. This tensor can
be accessed using: self.optimizer
'''
optimizer = tf.train.AdamOptimizer(learning_rate = self.learningRate)
return optimizer
@lazy_property
def train_step(self):
'''
Creates a tensor that represents a single training step. This tensor
can be passed a feed_dict that has x and y_, and it will compute the gradients
and perform a single step.
This tensor can be accessed using: self.train_step
'''
return self.optimizer.minimize(self.cross_entropy)
@lazy_property
def accuracy(self):
'''
Create a tensor that computes the accuracy, using the placeholder y_ as the labeled data
and y_conv for the predictions of the network.
This tensor can be accessed using: self.accuracy
'''
correct_prediction = tf.equal(tf.argmax(self.y_conv, 1), tf.argmax(self.y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy
'''
Properties that we'll use for debugging
'''
@lazy_property
def grads_and_vars(self):
grads_and_vars = self.optimizer.compute_gradients(self.cross_entropy, tf.trainable_variables())
return grads_and_vars
def train(self, batch_size, num_epochs, print_freq, debug_out='debug.txt'):
'''
Train the Network on the data that will have been loaded when the NN is initialized
Trained on: self.X_train, and a OH encoding of self.y_train
Trains with batch_size batches for num_epochs epochs
Debugging info is written to debug.txt (can add params to have more places to write out
to)
'''
# Starting an interactive session and initializing the parameters
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# replace it with the one-hot encoded one --- should I replace?
y_trainOH = sess.run(self.y_train_OHEnc)[:, 0, :]
y_valOH = sess.run(self.y_val_OHEnc)[:, 0, :]
# lists to record accuracy at several points during training
train_acc_list = []
val_acc_list = []
train_acc_on_batch_list = []
# lists to record the error at several points during training
train_err_list = []
val_err_list = []
train_err_on_batch_list = []
# track which epochs you record data during
epoch_numbers = []
# record the start time
t_start = time.time()
for epoch in range(num_epochs):
epochStart = time.time()
# train by systematically pulling batches of batch_size from
# the training set and taking a training step on each batch
for i in range(0, self.num_training_vec, batch_size):
batch_end_point = min(i + batch_size, self.num_training_vec)
train_batch_data = self.X_train[i : batch_end_point]
train_batch_label = y_trainOH[i : batch_end_point]
self.train_step.run(feed_dict={self.x: train_batch_data, self.y_: train_batch_label})
epochEnd = time.time()
# print and record data now that we've trained on our full training set
if (epoch + 1) % print_freq == 0:
# timing for the measurements of cost and accuracy
evaluationStart = time.time()
# compute training (on the most recent batch and the full data set)
# and validation cost and accuracy, then print them and add them to the list
# we start with accuracy:
train_acc = self.accuracy.eval(feed_dict={self.x:self.X_train, self.y_: y_trainOH})
train_acc_list.append(train_acc)
val_acc = self.accuracy.eval(feed_dict={self.x: self.X_val, self.y_: y_valOH})
val_acc_list.append(val_acc)
train_acc_on_batch = self.accuracy.eval(feed_dict={self.x:train_batch_data, self.y_:train_batch_label})
train_acc_on_batch_list.append(train_acc_on_batch)
# Now we compute the error on each set:
train_err = self.cross_entropy.eval(feed_dict={self.x: self.X_train, self.y_: y_trainOH})
train_err_list.append(train_err)
val_err = self.cross_entropy.eval(feed_dict={self.x: self.X_val, self.y_: y_valOH})
val_err_list.append(val_err)
train_err_on_batch = self.cross_entropy.eval(feed_dict={self.x:train_batch_data, self.y_:train_batch_label})
train_err_on_batch_list.append(train_err_on_batch)
# keep track of which epochs we have data for
epoch_numbers += [epoch]
# this marks the end of our evaluation
evaluationEnd = time.time()
# print a summary of our NN at this epoch
print("epoch: %d, time (train, evaluation): (%g, %g), t acc, v acc, t cost, v cost: %.5f, %.5f, %g, %g"%(epoch+1, epochEnd - epochStart, evaluationEnd - evaluationStart, train_acc, val_acc, train_err, val_err))
# debugging print outs
if self.debug:
# print out step / current value ratio for each parameter in our network
# based on training data from the most recent batch
# to the file with name debug_out
self.debug_WriteGradAndVar(train_batch_data, train_batch_label, epoch, debug_out)
# record the total time spent training the neural network
t_end = time.time()
print('--Time elapsed for training for %g epochs: %g'%(num_epochs, t_end - t_start))
# return the lists of logged data
return [train_acc_list, val_acc_list, train_err_list, val_err_list, train_acc_on_batch_list, train_err_on_batch_list, epoch_numbers]
def debug_WriteGradAndVar(self, xDebug, yDebug, epoch, debug_out):
'''
Helper function that prints the ratio of the training step that would be taken
on input data and labels xDebug and yDebug to the magnitude of each parameter
in the network. This gives us a sense of how much each parameter is changing.
Inputs:
xDebug: input data to calculate the gradient from
yDebug: labels for the input data
epoch: the number of the epoch (to print out to the file)
debug_out: the file to write to - if it doesn't exist it will be created
'''
file_object = open(debug_out, 'a+')
# record which epoch this is
file_object.write("Epoch: %d\n"%(epoch))
# find the current learning rate - this will be used with the gradient to find the step size
curLearningRate = self.optimizer._lr
# print each gradient and the variables they are associated with
# the gradients are stored in tuples, where the first element is a tensor
# that computes the gradient, and the second is the parameter that gradient
# is associated with
for gv in self.grads_and_vars:
curGrads = gv[0].eval(feed_dict={self.x: xDebug, self.y_: yDebug})
curSteps = curGrads * curLearningRate # scale down the graident by the learning rate
curVars = gv[1].eval()
# How much, compared to the magnitude of the weight, are we stepping
stepToVarRatio = np.absolute(np.divide(curSteps, curVars))
# print the name of the variable, then all the step ratios (step amount / current value)
# these values will have been averaged across the training examples
curName = gv[1].name
file_object.write("Variable: " + curName + "\n")
for index, step in np.ndenumerate(stepToVarRatio):
file_object.write(str(index) + ": " + str(step) + "\n")
# print summary statistics for this layer
maxVal = np.amax(stepToVarRatio)
thirdQuartile = np.percentile(stepToVarRatio, 75)
mean = np.mean(stepToVarRatio)
median = np.median(stepToVarRatio)
firstQuartile = np.percentile(stepToVarRatio, 25)
minVal = np.amin(stepToVarRatio)
file_object.write("Statistics: (%g, %g, %g, %g, %g, %g)\n"%(minVal, firstQuartile, median, mean, thirdQuartile, maxVal))
file_object.write("---------------------------------------\n")
# close the file
file_object.close()
@staticmethod
def loadData(filepath):
'''
Load and return four variables from the file with path filepath
X_train: input data for training
y_train: labels for X_train
X_val: input data for validation
y_val: labels for X_val
'''
print('==> Experiment 2l')
print('==> Loading data from {}'.format(filepath))
# benchmark
t_start = time.time()
# reading data
f = h5py.File(filepath)
X_train = np.array(f.get('trainingFeatures'))
y_train = np.array(f.get('trainingLabels'))
X_val = np.array(f.get('validationFeatures'))
y_val = np.array(f.get('validationLabels'))
t_end = time.time()
print('--Time elapsed for loading data: {t:.2f} \
seconds'.format(t = t_end - t_start))
del f
print('-- Number of training samples: {}'.format(X_train.shape[0]))
print('-- Number of validation samples: {}'.format(X_val.shape[0]))
print('Shape of X_train: %s'%str(X_train.shape))
print('Shape of y_train: %s'%str(y_train.shape))
print('Shape of X_val: %s'%str(X_val.shape))
print('Shape of y_val: %s'%str(y_val.shape))
return [X_train, y_train, X_val, y_val]
'''
Our main, with 121x1 convolutional layer.
'''
# set some parameters
filterRow = 1
filterCol = 1
k1 = 12
k2 = -1
learningRate = 0.0001
batchSize = 1000
numEpochs = 30
poolingStrategy = 'MAX'
# filepath to the data you want to laod
filepath = '/pylon2/ci560sp/cstrong/exp2/taylorswift_smallDataset_71_7.mat'
# create the model - this will create the TF graph as well as load the data
# (it loads the data because needs to know its dimensions to know num_frames
# to make the weight matrices)
m = Model(121, filepath, filterRow, filterCol, k1, k2, learningRate, poolingStrategy, True) # set it to debug
# actually train the model (on the data it already loaded)
# the first parameter is the batch size, the second is the number of epochs to train for,
# the third is the print frequency
m.train(1000, 30, 1)
| |
# -*- coding: utf-8 -*-
"""
pyrseas.function
~~~~~~~~~~~~~~~~
This module defines four classes: Proc derived from
DbSchemaObject, Function and Aggregate derived from Proc, and
FunctionDict derived from DbObjectDict.
"""
from pyrseas.dbobject import DbObjectDict, DbSchemaObject
VOLATILITY_TYPES = {'i': 'immutable', 's': 'stable', 'v': 'volatile'}
class Proc(DbSchemaObject):
"""A procedure such as a FUNCTION or an AGGREGATE"""
keylist = ['schema', 'name', 'arguments']
def extern_key(self):
"""Return the key to be used in external maps for this function
:return: string
"""
return '%s %s(%s)' % (self.objtype.lower(), self.name, self.arguments)
def identifier(self):
"""Return a full identifier for a function object
:return: string
"""
return "%s(%s)" % (self.qualname(), self.arguments)
class Function(Proc):
"""A procedural language function"""
objtype = "FUNCTION"
def to_map(self):
"""Convert a function to a YAML-suitable format
:return: dictionary
"""
dct = self.__dict__.copy()
for k in self.keylist:
del dct[k]
if self.volatility == 'v':
del dct['volatility']
else:
dct['volatility'] = VOLATILITY_TYPES[self.volatility]
if hasattr(self, 'dependent_table'):
del dct['dependent_table']
if hasattr(self, 'obj_file'):
dct['link_symbol'] = self.source
del dct['source']
return {self.extern_key(): dct}
def create(self, newsrc=None):
"""Return SQL statements to CREATE or REPLACE the function
:param newsrc: new source for a changed function
:return: SQL statements
"""
stmts = []
if hasattr(self, 'dependent_table'):
stmts.append(self.dependent_table.create())
if hasattr(self, 'obj_file'):
src = "'%s', '%s'" % (self.obj_file,
hasattr(self, 'link_symbol')
and self.link_symbol or self.name)
else:
src = "$_$%s$_$" % (newsrc or self.source)
volat = strict = secdef = ''
if hasattr(self, 'volatility'):
volat = ' ' + VOLATILITY_TYPES[self.volatility].upper()
if hasattr(self, 'strict') and self.strict:
strict = ' STRICT'
if hasattr(self, 'security_definer') and self.security_definer:
secdef = ' SECURITY DEFINER'
stmts.append("CREATE%s FUNCTION %s(%s) RETURNS %s\n LANGUAGE %s"
"%s%s%s\n AS %s" % (
newsrc and " OR REPLACE" or '', self.qualname(),
self.arguments, self.returns, self.language, volat, strict,
secdef, src))
if hasattr(self, 'description'):
stmts.append(self.comment())
return stmts
def diff_map(self, infunction):
"""Generate SQL to transform an existing function
:param infunction: a YAML map defining the new function
:return: list of SQL statements
Compares the function to an input function and generates SQL
statements to transform it into the one represented by the
input.
"""
stmts = []
if self.source != infunction.source:
stmts.append(self.create(infunction.source))
stmts.append(self.diff_description(infunction))
return stmts
class Aggregate(Proc):
"""An aggregate function"""
objtype = "AGGREGATE"
def to_map(self):
"""Convert an agggregate to a YAML-suitable format
:return: dictionary
"""
dct = self.__dict__.copy()
for k in self.keylist:
del dct[k]
del dct['language']
return {self.extern_key(): dct}
def create(self):
"""Return SQL statements to CREATE the aggregate
:return: SQL statements
"""
stmts = []
ffunc = cond = ''
if hasattr(self, 'finalfunc'):
ffname = self.finalfunc[:self.finalfunc.index('(')]
ffunc = ",\n FINALFUNC = %s" % (ffname)
if hasattr(self, 'initcond'):
cond = ",\n INITCOND = '%s'" % (self.initcond)
stmts.append("CREATE AGGREGATE %s(%s) (\n SFUNC = %s,"
"\n STYPE = %s%s%s)" % (
self.qualname(),
self.arguments, self.sfunc, self.stype, ffunc, cond))
if hasattr(self, 'description'):
stmts.append(self.comment())
return stmts
class ProcDict(DbObjectDict):
"The collection of regular and aggregate functions in a database"
cls = Proc
query = \
"""SELECT nspname AS schema, proname AS name,
pg_get_function_arguments(p.oid) AS arguments,
pg_get_function_result(p.oid) AS returns,
l.lanname AS language, provolatile AS volatility,
proisstrict AS strict, proisagg, prosrc AS source,
probin::text AS obj_file,
prosecdef AS security_definer,
aggtransfn::regprocedure AS sfunc,
aggtranstype::regtype AS stype,
aggfinalfn::regprocedure AS finalfunc,
agginitval AS initcond,
description
FROM pg_proc p
JOIN pg_namespace n ON (pronamespace = n.oid)
JOIN pg_language l ON (prolang = l.oid)
LEFT JOIN pg_aggregate a ON (p.oid = aggfnoid)
LEFT JOIN pg_description d
ON (p.oid = d.objoid AND d.objsubid = 0)
WHERE (nspname != 'pg_catalog' AND nspname != 'information_schema')
ORDER BY nspname, proname"""
query_83 = \
"""SELECT nspname AS schema, proname AS name,
array_to_string(ARRAY(
SELECT
CASE proargmodes[n]
WHEN 'o' THEN 'OUT '
WHEN 'b' THEN 'INOUT '
ELSE ''
END || coalesce(proargnames[n] || ' ', '') || proargtypes[n]
FROM
(SELECT
oid,
proname,
proargmodes,
proargnames,
proargtypes,
generate_series(1, array_upper(proargtypes, 1)) AS n,
array_upper(proargtypes, 1) as n_max
FROM
(SELECT
oid,
proname,
proargmodes,
proargnames,
coalesce(proallargtypes, (proargtypes::oid[])[array_lower(proargtypes::oid[],1):array_upper(proargtypes::oid[],1)])::regtype[] AS proargtypes
FROM
pg_proc
) AS q1
) AS q2 WHERE q2.oid = p.oid), ', ')
AS arguments,
CASE proretset
WHEN TRUE THEN 'SETOF '
ELSE ''
END || prorettype::regtype AS returns,
l.lanname AS language, provolatile AS volatility,
proisstrict AS strict, proisagg, prosrc AS source,
NULLIF(probin::text, '-') AS obj_file,
prosecdef AS security_definer,
aggtransfn::regprocedure AS sfunc,
aggtranstype::regtype AS stype,
aggfinalfn::regprocedure AS finalfunc,
agginitval AS initcond,
description
FROM pg_proc p
JOIN pg_namespace n ON (pronamespace = n.oid)
JOIN pg_language l ON (prolang = l.oid)
LEFT JOIN pg_aggregate a ON (p.oid = aggfnoid)
LEFT JOIN pg_description d
ON (p.oid = d.objoid AND d.objsubid = 0)
WHERE (nspname != 'pg_catalog' AND nspname != 'information_schema')
ORDER BY nspname, proname"""
def _from_catalog(self):
"""Initialize the dictionary of procedures by querying the catalogs"""
if self.dbconn.version < 84000:
self.query = self.query_83
for proc in self.fetch():
sch, prc, arg = proc.key()
if hasattr(proc, 'proisagg'):
del proc.proisagg
del proc.source
del proc.volatility
del proc.returns
if proc.finalfunc == '-':
del proc.finalfunc
self[(sch, prc, arg)] = Aggregate(**proc.__dict__)
else:
self[(sch, prc, arg)] = Function(**proc.__dict__)
def from_map(self, schema, infuncs):
"""Initalize the dictionary of functions by converting the input map
:param schema: schema owning the functions
:param infuncs: YAML map defining the functions
"""
for key in infuncs.keys():
spc = key.find(' ')
if spc == -1:
raise KeyError("Unrecognized object type: %s" % key)
objtype = key[:spc]
if objtype not in ['function', 'aggregate']:
raise KeyError("Unrecognized object type: %s" % key)
fnc = key[spc + 1:]
paren = fnc.find('(')
if paren == -1 or fnc[-1:] != ')':
raise KeyError("Invalid function signature: %s" % fnc)
arguments = fnc[paren + 1:-1]
infunc = infuncs[key]
fnc = fnc[:paren]
if objtype == 'function':
self[(schema.name, fnc, arguments)] = func = Function(
schema=schema.name, name=fnc, arguments=arguments)
else:
self[(schema.name, fnc, arguments)] = func = Aggregate(
schema=schema.name, name=fnc, arguments=arguments)
func.language = 'internal'
if not infunc:
raise ValueError("Function '%s' has no specification" % fnc)
for attr, val in infunc.items():
setattr(func, attr, val)
if hasattr(func, 'volatility'):
func.volatility = func.volatility[:1].lower()
if isinstance(func, Function):
src = hasattr(func, 'source')
obj = hasattr(func, 'obj_file')
if (src and obj) or not (src or obj):
raise ValueError("Function '%s': either source or "
"obj_file must be specified" % fnc)
if 'oldname' in infunc:
func.oldname = infunc['oldname']
if 'description' in infunc:
func.description = infunc['description']
def diff_map(self, infuncs):
"""Generate SQL to transform existing functions
:param infuncs: a YAML map defining the new functions
:return: list of SQL statements
Compares the existing function definitions, as fetched from
the catalogs, to the input map and generates SQL statements to
transform the functions accordingly.
"""
stmts = []
created = False
# check input functions
for (sch, fnc, arg) in infuncs.keys():
infunc = infuncs[(sch, fnc, arg)]
if isinstance(infunc, Aggregate):
continue
# does it exist in the database?
if (sch, fnc, arg) not in self:
if not hasattr(infunc, 'oldname'):
# create new function
stmts.append(infunc.create())
created = True
else:
stmts.append(self[(sch, fnc, arg)].rename(infunc))
else:
# check function objects
diff_stmts = self[(sch, fnc, arg)].diff_map(infunc)
for stmt in diff_stmts:
if isinstance(stmt, list) and stmt:
stmt = stmt[0]
if isinstance(stmt, basestring) and \
stmt.startswith("CREATE "):
created = True
break
stmts.append(diff_stmts)
# check input aggregates
for (sch, fnc, arg) in infuncs.keys():
infunc = infuncs[(sch, fnc, arg)]
if not isinstance(infunc, Aggregate):
continue
# does it exist in the database?
if (sch, fnc, arg) not in self:
if not hasattr(infunc, 'oldname'):
# create new function
stmts.append(infunc.create())
else:
stmts.append(self[(sch, fnc, arg)].rename(infunc))
else:
# check function objects
stmts.append(self[(sch, fnc, arg)].diff_map(infunc))
# check existing functions
for (sch, fnc, arg) in self.keys():
func = self[(sch, fnc, arg)]
# if missing, mark it for dropping
if (sch, fnc, arg) not in infuncs:
func.dropped = False
if created:
stmts.insert(0, "SET check_function_bodies = false")
return stmts
def _drop(self):
"""Actually drop the functions
:return: SQL statements
"""
stmts = []
for (sch, fnc, arg) in self.keys():
func = self[(sch, fnc, arg)]
if hasattr(func, 'dropped'):
stmts.append(func.drop())
return stmts
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.utils import AnalysisException
from pyspark.sql.functions import array, explode, col, lit, mean, min, max, rank, \
udf, pandas_udf, PandasUDFType
from pyspark.sql.window import Window
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
from pandas.util.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class WindowPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
return udf(lambda v: v + 1, 'double')
@property
def pandas_scalar_time_two(self):
return pandas_udf(lambda v: v * 2, 'double')
@property
def pandas_agg_count_udf(self):
@pandas_udf('long', PandasUDFType.GROUPED_AGG)
def count(v):
return len(v)
return count
@property
def pandas_agg_mean_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_max_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max(v):
return v.max()
return max
@property
def pandas_agg_min_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def min(v):
return v.min()
return min
@property
def unbounded_window(self):
return Window.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing).orderBy('v')
@property
def ordered_window(self):
return Window.partitionBy('id').orderBy('v')
@property
def unpartitioned_window(self):
return Window.partitionBy()
@property
def sliding_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(-2, 1)
@property
def sliding_range_window(self):
return Window.partitionBy('id').orderBy('v').rangeBetween(-2, 4)
@property
def growing_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(Window.unboundedPreceding, 3)
@property
def growing_range_window(self):
return Window.partitionBy('id').orderBy('v') \
.rangeBetween(Window.unboundedPreceding, 4)
@property
def shrinking_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(-2, Window.unboundedFollowing)
@property
def shrinking_range_window(self):
return Window.partitionBy('id').orderBy('v') \
.rangeBetween(-3, Window.unboundedFollowing)
def test_simple(self):
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
def test_multiple_udfs(self):
df = self.data
w = self.unbounded_window
result1 = df.withColumn('mean_v', self.pandas_agg_mean_udf(df['v']).over(w)) \
.withColumn('max_v', self.pandas_agg_max_udf(df['v']).over(w)) \
.withColumn('min_w', self.pandas_agg_min_udf(df['w']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w)) \
.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('min_w', min(df['w']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_replace_existing(self):
df = self.data
w = self.unbounded_window
result1 = df.withColumn('v', self.pandas_agg_mean_udf(df['v']).over(w))
expected1 = df.withColumn('v', mean(df['v']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v', mean_udf(df['v'] * 2).over(w) + 1)
expected1 = df.withColumn('v', mean(df['v'] * 2).over(w) + 1)
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_mixed_udf(self):
df = self.data
w = self.unbounded_window
plus_one = self.python_plus_one
time_two = self.pandas_scalar_time_two
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn(
'v2',
plus_one(mean_udf(plus_one(df['v'])).over(w)))
expected1 = df.withColumn(
'v2',
plus_one(mean(plus_one(df['v'])).over(w)))
result2 = df.withColumn(
'v2',
time_two(mean_udf(time_two(df['v'])).over(w)))
expected2 = df.withColumn(
'v2',
time_two(mean(time_two(df['v'])).over(w)))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
def test_without_partitionBy(self):
df = self.data
w = self.unpartitioned_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v2', mean_udf(df['v']).over(w))
expected1 = df.withColumn('v2', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
def test_mixed_sql_and_udf(self):
df = self.data
w = self.unbounded_window
ow = self.ordered_window
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min_udf(df['v']).over(w))
expected1 = df.withColumn('v_diff', max(df['v']).over(w) - min(df['v']).over(w))
# Test mixing sql window function and window udf in the same expression
result2 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min(df['v']).over(w))
expected2 = expected1
# Test chaining sql aggregate function and udf
result3 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('min_v', min(df['v']).over(w)) \
.withColumn('v_diff', col('max_v') - col('min_v')) \
.drop('max_v', 'min_v')
expected3 = expected1
# Test mixing sql window function and udf
result4 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
expected4 = df.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
def test_array_type(self):
df = self.data
w = self.unbounded_window
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.withColumn('v2', array_udf(df['v']).over(w))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
df = self.data
w = self.unbounded_window
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'.*not supported within a window function'):
foo_udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
df.withColumn('v2', foo_udf(df['v']).over(w))
def test_bounded_simple(self):
from pyspark.sql.functions import mean, max, min, count
df = self.data
w1 = self.sliding_row_window
w2 = self.shrinking_range_window
plus_one = self.python_plus_one
count_udf = self.pandas_agg_count_udf
mean_udf = self.pandas_agg_mean_udf
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('mean_v', mean_udf(plus_one(df['v'])).over(w1)) \
.withColumn('count_v', count_udf(df['v']).over(w2)) \
.withColumn('max_v', max_udf(df['v']).over(w2)) \
.withColumn('min_v', min_udf(df['v']).over(w1))
expected1 = df.withColumn('mean_v', mean(plus_one(df['v'])).over(w1)) \
.withColumn('count_v', count(df['v']).over(w2)) \
.withColumn('max_v', max(df['v']).over(w2)) \
.withColumn('min_v', min(df['v']).over(w1))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_growing_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.growing_row_window
w2 = self.growing_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_sliding_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.sliding_row_window
w2 = self.sliding_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_shrinking_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.shrinking_row_window
w2 = self.shrinking_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_bounded_mixed(self):
from pyspark.sql.functions import mean, max
df = self.data
w1 = self.sliding_row_window
w2 = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
max_udf = self.pandas_agg_max_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w1)) \
.withColumn('max_v', max_udf(df['v']).over(w2)) \
.withColumn('mean_unbounded_v', mean_udf(df['v']).over(w1))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w1)) \
.withColumn('max_v', max(df['v']).over(w2)) \
.withColumn('mean_unbounded_v', mean(df['v']).over(w1))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_window import * # noqa: F401
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| |
# Copyright (c) 2012 OpenStack Foundation.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import mock
from oslo.config import cfg
import webob.exc
from quantum.api.v2 import attributes as attr
from quantum.common.test_lib import test_config
from quantum import context
from quantum.db import db_base_plugin_v2
from quantum.db import securitygroups_db
from quantum.extensions import securitygroup as ext_sg
from quantum.tests.unit import test_db_plugin
DB_PLUGIN_KLASS = ('quantum.tests.unit.test_extension_security_group.'
'SecurityGroupTestPlugin')
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
class SecurityGroupTestExtensionManager(object):
def get_resources(self):
return ext_sg.Securitygroup.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class SecurityGroupsTestCase(test_db_plugin.QuantumDbPluginV2TestCase):
def _create_security_group(self, fmt, name, description, **kwargs):
data = {'security_group': {'name': name,
'tenant_id': kwargs.get('tenant_id',
'test_tenant'),
'description': description}}
security_group_req = self.new_create_request('security-groups', data,
fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_req.environ['quantum.context'] = (
context.Context('', kwargs['tenant_id']))
return security_group_req.get_response(self.ext_api)
def _build_security_group_rule(self, security_group_id, direction,
protocol, port_range_min, port_range_max,
remote_ip_prefix=None, remote_group_id=None,
tenant_id='test_tenant',
ethertype='IPv4'):
data = {'security_group_rule': {'security_group_id': security_group_id,
'direction': direction,
'protocol': protocol,
'ethertype': ethertype,
'port_range_min': port_range_min,
'port_range_max': port_range_max,
'tenant_id': tenant_id,
'ethertype': ethertype}}
if remote_ip_prefix:
data['security_group_rule']['remote_ip_prefix'] = remote_ip_prefix
if remote_group_id:
data['security_group_rule']['remote_group_id'] = remote_group_id
return data
def _create_security_group_rule(self, fmt, rules, **kwargs):
security_group_rule_req = self.new_create_request(
'security-group-rules', rules, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_rule_req.environ['quantum.context'] = (
context.Context('', kwargs['tenant_id']))
return security_group_rule_req.get_response(self.ext_api)
def _make_security_group(self, fmt, name, description, **kwargs):
res = self._create_security_group(fmt, name, description, **kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_security_group_rule(self, fmt, rules, **kwargs):
res = self._create_security_group_rule(self.fmt, rules)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def security_group(self, name='webservers', description='webservers',
fmt=None, no_delete=False):
if not fmt:
fmt = self.fmt
security_group = self._make_security_group(fmt, name, description)
try:
yield security_group
finally:
if not no_delete:
self._delete('security-groups',
security_group['security_group']['id'])
@contextlib.contextmanager
def security_group_rule(self, security_group_id='4cd70774-cc67-4a87-9b39-7'
'd1db38eb087',
direction='ingress', protocol='tcp',
port_range_min='22', port_range_max='22',
remote_ip_prefix=None, remote_group_id=None,
fmt=None, no_delete=False, ethertype='IPv4'):
if not fmt:
fmt = self.fmt
rule = self._build_security_group_rule(security_group_id,
direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id,
ethertype=ethertype)
security_group_rule = self._make_security_group_rule(self.fmt, rule)
try:
yield security_group_rule
finally:
if not no_delete:
self._delete('security-group-rules',
security_group_rule['security_group_rule']['id'])
def _delete_default_security_group_egress_rules(self, security_group_id):
"""Deletes default egress rules given a security group ID"""
res = self._list(
'security-group-rules',
query_params='security_group_id=%s' % security_group_id)
for r in res['security_group_rules']:
if (r['direction'] == 'egress' and not r['port_range_max'] and
not r['port_range_min'] and not r['protocol']
and not r['remote_ip_prefix']):
self._delete('security-group-rules', r['id'])
def _assert_sg_rule_has_kvs(self, security_group_rule, expected_kvs):
"""Asserts that the sg rule has expected key/value pairs passed
in as expected_kvs dictionary
"""
for k, v in expected_kvs.iteritems():
self.assertEquals(security_group_rule[k], v)
class SecurityGroupsTestCaseXML(SecurityGroupsTestCase):
fmt = 'xml'
class SecurityGroupTestPlugin(db_base_plugin_v2.QuantumDbPluginV2,
securitygroups_db.SecurityGroupDbMixin):
""" Test plugin that implements necessary calls on create/delete port for
associating ports with security groups.
"""
__native_pagination_support = True
__native_sorting_support = True
supported_extension_aliases = ["security-group"]
def create_port(self, context, port):
tenant_id = self._get_tenant_id_for_create(context, port['port'])
default_sg = self._ensure_default_security_group(context, tenant_id)
if not attr.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)):
port['port'][ext_sg.SECURITYGROUPS] = [default_sg]
session = context.session
with session.begin(subtransactions=True):
sgids = self._get_security_groups_on_port(context, port)
port = super(SecurityGroupTestPlugin, self).create_port(context,
port)
self._process_port_create_security_group(context, port['id'],
sgids)
self._extend_port_dict_security_group(context, port)
return port
def update_port(self, context, id, port):
session = context.session
with session.begin(subtransactions=True):
if ext_sg.SECURITYGROUPS in port['port']:
port['port'][ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
# delete the port binding and read it with the new rules
self._delete_port_security_group_bindings(context, id)
self._process_port_create_security_group(
context, id, port['port'].get(ext_sg.SECURITYGROUPS))
port = super(SecurityGroupTestPlugin, self).update_port(
context, id, port)
self._extend_port_dict_security_group(context, port)
return port
def create_network(self, context, network):
tenant_id = self._get_tenant_id_for_create(context, network['network'])
self._ensure_default_security_group(context, tenant_id)
return super(SecurityGroupTestPlugin, self).create_network(context,
network)
def get_ports(self, context, filters=None, fields=None,
sorts=[], limit=None, marker=None,
page_reverse=False):
quantum_lports = super(SecurityGroupTestPlugin, self).get_ports(
context, filters, sorts=sorts, limit=limit, marker=marker,
page_reverse=page_reverse)
for quantum_lport in quantum_lports:
self._extend_port_dict_security_group(context, quantum_lport)
return quantum_lports
class SecurityGroupDBTestCase(SecurityGroupsTestCase):
def setUp(self, plugin=None):
test_config['plugin_name_v2'] = DB_PLUGIN_KLASS
ext_mgr = SecurityGroupTestExtensionManager()
test_config['extension_manager'] = ext_mgr
super(SecurityGroupDBTestCase, self).setUp(plugin)
def tearDown(self):
del test_config['plugin_name_v2']
super(SecurityGroupDBTestCase, self).tearDown()
class TestSecurityGroups(SecurityGroupDBTestCase):
def test_create_security_group(self):
name = 'webservers'
description = 'my webservers'
keys = [('name', name,), ('description', description)]
with self.security_group(name, description) as security_group:
for k, v, in keys:
self.assertEqual(security_group['security_group'][k], v)
# Verify that default egress rules have been created
sg_rules = security_group['security_group']['security_group_rules']
self.assertEquals(len(sg_rules), 2)
v4_rules = filter(lambda x: x['ethertype'] == 'IPv4', sg_rules)
self.assertEquals(len(v4_rules), 1)
v4_rule = v4_rules[0]
expected = {'direction': 'egress',
'ethertype': 'IPv4',
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_rule, expected)
v6_rules = filter(lambda x: x['ethertype'] == 'IPv6', sg_rules)
self.assertEquals(len(v6_rules), 1)
v6_rule = v6_rules[0]
expected = {'direction': 'egress',
'ethertype': 'IPv6',
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_rule, expected)
def test_default_security_group(self):
with self.network():
res = self.new_list_request('security-groups')
groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(groups['security_groups']), 1)
def test_create_default_security_group_fail(self):
name = 'default'
description = 'my webservers'
res = self._create_security_group(self.fmt, name, description)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_list_security_groups(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as security_groups:
self._test_list_resources('security-group',
security_groups,
query_params='description=sg')
def test_list_security_groups_with_sort(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as (sg1, sg2, sg3):
self._test_list_with_sort('security-group',
(sg3, sg2, sg1),
[('name', 'desc')],
query_params='description=sg')
def test_list_security_groups_with_pagination(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as (sg1, sg2, sg3):
self._test_list_with_pagination('security-group',
(sg1, sg2, sg3),
('name', 'asc'), 2, 2,
query_params='description=sg')
def test_list_security_groups_with_pagination_reverse(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as (sg1, sg2, sg3):
self._test_list_with_pagination_reverse(
'security-group', (sg1, sg2, sg3), ('name', 'asc'), 2, 2,
query_params='description=sg')
def test_create_security_group_rule_ethertype_invalid_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
ethertype = 2
rule = self._build_security_group_rule(
security_group_id, 'ingress', 'tcp', '22', '22', None, None,
ethertype=ethertype)
res = self._create_security_group_rule('json', rule)
self.deserialize('json', res)
self.assertEqual(res.status_int, 400)
def test_create_security_group_rule_protocol_invalid_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
protocol = 2
rule = self._build_security_group_rule(
security_group_id, 'ingress', protocol, '22', '22',
None, None)
res = self._create_security_group_rule('json', rule)
self.deserialize('json', res)
self.assertEqual(res.status_int, 400)
def test_create_security_group_rule_case_insensitive(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'TCP'
port_range_min = 22
port_range_max = 22
ethertype = 'ipV4'
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
ethertype=ethertype) as rule:
# the lower case value will be return
self.assertEqual(rule['security_group_rule']['protocol'],
protocol.lower())
self.assertEqual(rule['security_group_rule']['ethertype'],
'IPv4')
def test_get_security_group(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
remote_group_id = sg['security_group']['id']
res = self.new_show_request('security-groups', remote_group_id)
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'tcp'
port_range_min = 22
port_range_max = 22
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix):
group = self.deserialize(
self.fmt, res.get_response(self.ext_api))
sg_rule = group['security_group']['security_group_rules']
self.assertEqual(group['security_group']['id'],
remote_group_id)
self.assertEqual(len(sg_rule), 3)
sg_rule = filter(lambda x: x['direction'] == 'ingress',
sg_rule)
for k, v, in keys:
self.assertEqual(sg_rule[0][k], v)
def test_delete_security_group(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description, no_delete=True) as sg:
remote_group_id = sg['security_group']['id']
self._delete('security-groups', remote_group_id, 204)
def test_delete_default_security_group_fail(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
self._delete('security-groups', sg['security_groups'][0]['id'],
409)
def test_default_security_group_rules(self):
with self.network():
res = self.new_list_request('security-groups')
groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(groups['security_groups']), 1)
security_group_id = groups['security_groups'][0]['id']
res = self.new_list_request('security-group-rules')
rules = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(rules['security_group_rules']), 4)
# Verify default rule for v4 egress
sg_rules = rules['security_group_rules']
rules = filter(
lambda x: (
x['direction'] == 'egress' and x['ethertype'] == 'IPv4'),
sg_rules)
self.assertEqual(len(rules), 1)
v4_egress = rules[0]
expected = {'direction': 'egress',
'ethertype': 'IPv4',
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_egress, expected)
# Verify default rule for v6 egress
rules = filter(
lambda x: (
x['direction'] == 'egress' and x['ethertype'] == 'IPv6'),
sg_rules)
self.assertEqual(len(rules), 1)
v6_egress = rules[0]
expected = {'direction': 'egress',
'ethertype': 'IPv6',
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_egress, expected)
# Verify default rule for v4 ingress
rules = filter(
lambda x: (
x['direction'] == 'ingress' and x['ethertype'] == 'IPv4'),
sg_rules)
self.assertEqual(len(rules), 1)
v4_ingress = rules[0]
expected = {'direction': 'ingress',
'ethertype': 'IPv4',
'remote_group_id': security_group_id,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_ingress, expected)
# Verify default rule for v6 ingress
rules = filter(
lambda x: (
x['direction'] == 'ingress' and x['ethertype'] == 'IPv6'),
sg_rules)
self.assertEqual(len(rules), 1)
v6_ingress = rules[0]
expected = {'direction': 'ingress',
'ethertype': 'IPv6',
'remote_group_id': security_group_id,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_ingress, expected)
def test_create_security_group_rule_remote_ip_prefix(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'tcp'
port_range_min = 22
port_range_max = 22
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_group_id(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
with self.security_group(name, description) as sg2:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_group_id = sg2['security_group']['id']
protocol = 'tcp'
port_range_min = 22
port_range_max = 22
keys = [('remote_group_id', remote_group_id),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_group_id=remote_group_id
) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_source_group_ip_and_ip_prefix(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'tcp'
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_create_security_group_rule_bad_security_group_id(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'tcp'
port_range_min = 22
port_range_max = 22
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 404)
def test_create_security_group_rule_bad_tenant(self):
with self.security_group() as sg:
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': "bad_tenant"}}
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 404)
def test_create_security_group_rule_bad_tenant_remote_group_id(self):
with self.security_group() as sg:
res = self._create_security_group(self.fmt, 'webservers',
'webservers',
tenant_id='bad_tenant')
sg2 = self.deserialize(self.fmt, res)
rule = {'security_group_rule':
{'security_group_id': sg2['security_group']['id'],
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': 'bad_tenant',
'remote_group_id': sg['security_group']['id']}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 404)
def test_create_security_group_rule_bad_tenant_security_group_rule(self):
with self.security_group() as sg:
res = self._create_security_group(self.fmt, 'webservers',
'webservers',
tenant_id='bad_tenant')
self.deserialize(self.fmt, res)
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': 'bad_tenant'}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 404)
def test_create_security_group_rule_bad_remote_group_id(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
remote_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
protocol = 'tcp'
port_range_min = 22
port_range_max = 22
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_group_id=remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 404)
def test_create_security_group_rule_duplicate_rules(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', 'tcp', '22', '22')
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_security_group_rule_min_port_greater_max(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', 'tcp', '50', '22')
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_create_security_group_rule_ports_but_no_protocol(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', None, '22', '22')
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_list_ports_security_group(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'])
self.deserialize(self.fmt, res)
res = self.new_list_request('ports')
ports = self.deserialize(self.fmt,
res.get_response(self.api))
port = ports['ports'][0]
self.assertEqual(len(port[ext_sg.SECURITYGROUPS]), 1)
self._delete('ports', port['id'])
def test_list_security_group_rules(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_resources('security-group-rule',
[sgr1, sgr2, sgr3],
query_params=q)
def test_list_security_group_rules_with_sort(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_with_sort('security-group-rule',
(sgr3, sgr2, sgr1),
[('port_range_max', 'desc')],
query_params=q)
def test_list_security_group_rules_with_pagination(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_with_pagination(
'security-group-rule', (sgr3, sgr2, sgr1),
('port_range_max', 'desc'), 2, 2,
query_params=q)
def test_list_security_group_rules_with_pagination_reverse(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
self._test_list_with_pagination_reverse(
'security-group-rule', (sgr3, sgr2, sgr1),
('port_range_max', 'desc'), 2, 2,
query_params='direction=egress')
def test_update_port_with_security_group(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'])
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
ext_sg.SECURITYGROUPS:
[sg['security_group']['id']]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
# Test update port without security group
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name']}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
self._delete('ports', port['port']['id'])
def test_update_port_with_multiple_security_groups(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg1:
with self.security_group() as sg2:
res = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1['security_group']['id'],
sg2['security_group']['id']])
port = self.deserialize(self.fmt, res)
self.assertEqual(len(
port['port'][ext_sg.SECURITYGROUPS]), 2)
self._delete('ports', port['port']['id'])
def test_update_port_remove_security_group_empty_list(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
'security_groups': []}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'].get(ext_sg.SECURITYGROUPS),
[])
self._delete('ports', port['port']['id'])
def test_update_port_remove_security_group_none(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
'security_groups': None}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'].get(ext_sg.SECURITYGROUPS),
[])
self._delete('ports', port['port']['id'])
def test_create_port_with_bad_security_group(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
security_groups=['bad_id'])
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_create_delete_security_group_port_in_use(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
# try to delete security group that's in use
res = self._delete('security-groups',
sg['security_group']['id'], 409)
# delete the blocking port
self._delete('ports', port['port']['id'])
def test_create_security_group_rule_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule1 = self._build_security_group_rule(sg['security_group']['id'],
'ingress', 'tcp', '22',
'22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(sg['security_group']['id'],
'ingress', 'tcp', '23',
'23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 201)
def test_create_security_group_rule_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule1 = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', 'tcp', '22', '22',
'10.0.0.1/24')
rule2 = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', 'tcp', '23', '23',
'10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]
}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 201)
def test_create_security_group_rule_duplicate_rule_in_post(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule = self._build_security_group_rule(sg['security_group']['id'],
'ingress', 'tcp', '22',
'22', '10.0.0.1/24')
rules = {'security_group_rules': [rule['security_group_rule'],
rule['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_security_group_rule_duplicate_rule_in_post_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', 'tcp', '22', '22',
'10.0.0.1/24')
rules = {'security_group_rules': [rule['security_group_rule'],
rule['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_security_group_rule_duplicate_rule_db(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule = self._build_security_group_rule(sg['security_group']['id'],
'ingress', 'tcp', '22',
'22', '10.0.0.1/24')
rules = {'security_group_rules': [rule]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_security_group_rule_duplicate_rule_db_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', 'tcp', '22', '22',
'10.0.0.1/24')
rules = {'security_group_rules': [rule]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_security_group_rule_differnt_security_group_ids(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg1:
with self.security_group() as sg2:
rule1 = self._build_security_group_rule(
sg1['security_group']['id'], 'ingress', 'tcp', '22', '22',
'10.0.0.1/24')
rule2 = self._build_security_group_rule(
sg2['security_group']['id'], 'ingress', 'tcp', '23', '23',
'10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]
}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_create_security_group_rule_with_invalid_ethertype(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'tcp'
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id,
ethertype='IPv5')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_create_security_group_rule_with_invalid_protocol(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'tcp/ip'
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_create_port_with_non_uuid(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
security_groups=['not_valid'])
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
class TestSecurityGroupsXML(TestSecurityGroups):
fmt = 'xml'
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import logging
import re
import urllib
import urllib2
from quantum.openstack.common.gettextutils import _
from quantum.openstack.common import jsonutils
LOG = logging.getLogger(__name__)
_rules = None
_checks = {}
class Rules(dict):
"""
A store for rules. Handles the default_rule setting directly.
"""
@classmethod
def load_json(cls, data, default_rule=None):
"""
Allow loading of JSON rule data.
"""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule or self.default_rule not in self:
raise KeyError(key)
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
# Really have to figure out a way to deprecate this
def set_rules(rules):
"""Set the rules in use for policy checks."""
global _rules
_rules = rules
# Ditto
def reset():
"""Clear the rules used for policy checks."""
global _rules
_rules = None
def check(rule, target, creds, exc=None, *args, **kwargs):
"""
Checks authorization of a rule against the target and credentials.
:param rule: The rule to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to check() (both
positional and keyword arguments) will be passed to
the exception class. If exc is not provided, returns
False.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds)
elif not _rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = _rules[rule](target, creds)
except KeyError:
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if exc and result is False:
raise exc(*args, **kwargs)
return result
class BaseCheck(object):
"""
Abstract base class for Check classes.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __str__(self):
"""
Retrieve a string representation of the Check tree rooted at
this node.
"""
pass
@abc.abstractmethod
def __call__(self, target, cred):
"""
Perform the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""
A policy check that always returns False (disallow).
"""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""
A policy check that always returns True (allow).
"""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred):
"""Check the policy."""
return True
class Check(BaseCheck):
"""
A base class to allow for user-defined policy checks.
"""
def __init__(self, kind, match):
"""
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""
A policy check that inverts the result of another policy check.
Implements the "not" operator.
"""
def __init__(self, rule):
"""
Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred):
"""
Check the policy. Returns the logical inverse of the wrapped
check.
"""
return not self.rule(target, cred)
class AndCheck(BaseCheck):
"""
A policy check that requires that a list of other checks all
return True. Implements the "and" operator.
"""
def __init__(self, rules):
"""
Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""
Check the policy. Requires that all rules accept in order to
return True.
"""
for rule in self.rules:
if not rule(target, cred):
return False
return True
def add_check(self, rule):
"""
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""
A policy check that requires that at least one of a list of other
checks returns True. Implements the "or" operator.
"""
def __init__(self, rules):
"""
Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""
Check the policy. Requires that at least one rule accept in
order to return True.
"""
for rule in self.rules:
if rule(target, cred):
return True
return False
def add_check(self, rule):
"""
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""
Parse a single base check rule into an appropriate Check object.
"""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_("Failed to understand rule %(rule)s") % locals())
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""
Provided for backwards compatibility. Translates the old
list-of-lists syntax into a tree of Check objects.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, basestring):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if len(or_list) == 0:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""
Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""
Metaclass for the ParseState class. Facilitates identifying
reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""
Create the class. Injects the 'reducers' list, a list of
tuples matching token sequences to the names of the
corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""
Decorator for reduction methods. Arguments are a sequence of
tokens, in order, which should trigger running this reduction
method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
class ParseState(object):
"""
Implement the core of parsing the policy language. Uses a greedy
reduction algorithm to reduce a sequence of tokens into a single
terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
__metaclass__ = ParseStateMeta
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""
Perform a greedy reduction of the token stream. If a reducer
method matches, it will be executed, then the reduce() method
will be called recursively to search for any more possible
reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""
Obtain the final result of the parse. Raises ValueError if
the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""
Create an 'and_expr' from two checks joined by the 'and'
operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""
Extend an 'and_expr' by adding one more check.
"""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""
Create an 'or_expr' from two checks joined by the 'or'
operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""
Extend an 'or_expr' by adding one more check.
"""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_("Failed to understand rule %(rule)r") % locals())
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""
Parses a policy rule into a tree of Check objects.
"""
# If the rule is a string, it's in the policy language
if isinstance(rule, basestring):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""
Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds):
"""
Recursively checks credentials based on the defined rules.
"""
try:
return _rules[self.match](target, creds)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds):
"""
Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urllib.urlencode(data)
f = urllib2.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds):
"""
Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
"""
# TODO(termie): do dict inspection via dot syntax
match = self.match % target
if self.kind in creds:
return match == unicode(creds[self.kind])
return False
| |
from __future__ import unicode_literals
from django.contrib.admin import ModelAdmin, TabularInline
from django.contrib.admin.helpers import InlineAdminForm
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.test import RequestFactory, TestCase, override_settings
from django.urls import reverse
from .admin import InnerInline, site as admin_site
from .models import (
Author, BinaryTree, Book, Chapter, Child, ChildModel1, ChildModel2,
Fashionista, FootNote, Holder, Holder2, Holder3, Holder4, Inner, Inner2,
Inner3, Inner4Stacked, Inner4Tabular, Novel, OutfitItem, Parent,
ParentModelWithCustomPk, Person, Poll, Profile, ProfileCollection,
Question, Sighting, SomeChildModel, SomeParentModel, Teacher,
)
INLINE_CHANGELINK_HTML = 'class="inlinechangelink">Change</a>'
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', email='super@example.com', password='secret')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInline(TestDataMixin, TestCase):
def setUp(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
self.client.force_login(self.superuser)
self.factory = RequestFactory()
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
holder = Holder.objects.get(dummy=13)
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(holder.id,))
)
inner_formset = response.context['inline_admin_formsets'][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, 'can_delete must be equal')
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
Inner.objects.create(holder=holder, dummy=42, readonly='')
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(holder.id,))
)
self.assertContains(response, '<label>Inner readonly label:</label>')
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# The heading for the m2m inline block uses the right text
self.assertContains(response, '<h2>Author-book relationships</h2>')
# The "add another" label is correct
self.assertContains(response, 'Add another Author-book relationship')
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname='Imelda')
item = OutfitItem.objects.create(name='Shoes')
# Imelda likes shoes, but can't carry her own bags.
data = {
'shoppingweakness_set-TOTAL_FORMS': 1,
'shoppingweakness_set-INITIAL_FORMS': 0,
'shoppingweakness_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'person': person.id,
'max_weight': 0,
'shoppingweakness_set-0-item': item.id,
}
response = self.client.post(reverse('admin:admin_inlines_fashionista_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1)
def test_custom_form_tabular_inline_label(self):
"""
A model form with a form field specified (TitleForm.title1) should have
its label rendered in the tabular inline.
"""
response = self.client.get(reverse('admin:admin_inlines_titlecollection_add'))
self.assertContains(response, '<th class="required">Title1</th>', html=True)
def test_custom_form_tabular_inline_overridden_label(self):
"""
SomeChildModelForm.__init__() overrides the label of a form field.
That label is displayed in the TabularInline.
"""
response = self.client.get(reverse('admin:admin_inlines_someparentmodel_add'))
field = list(response.context['inline_admin_formset'].fields())[0]
self.assertEqual(field['label'], 'new label')
self.assertContains(response, '<th class="required">New label</th>', html=True)
def test_tabular_non_field_errors(self):
"""
Ensure that non_field_errors are displayed correctly, including the
right value for colspan. Refs #13510.
"""
data = {
'title_set-TOTAL_FORMS': 1,
'title_set-INITIAL_FORMS': 0,
'title_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'title_set-0-title1': 'a title',
'title_set-0-title2': 'a different title',
}
response = self.client.post(reverse('admin:admin_inlines_titlecollection_add'), data)
# Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbox.
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield">'
'<li>The two titles must be the same</li></ul></td></tr>'
)
def test_no_parent_callable_lookup(self):
"""Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get(reverse('admin:admin_inlines_novel_add'))
self.assertEqual(response.status_code, 200)
# View should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="chapter_set-group"'
)
def test_callable_lookup(self):
"""Admin inline should invoke local callable when its name is listed in readonly_fields"""
response = self.client.get(reverse('admin:admin_inlines_poll_add'))
self.assertEqual(response.status_code, 200)
# Add parent object view should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="question_set-group"'
)
# The right callable should be used for the inline readonly_fields
# column cells
self.assertContains(response, '<p>Callable in QuestionInline</p>')
def test_help_text(self):
"""
Ensure that the inlines' model field help texts are displayed when
using both the stacked and tabular layouts.
Ref #8190.
"""
response = self.client.get(reverse('admin:admin_inlines_holder4_add'))
self.assertContains(response, '<div class="help">Awesome stacked help text is awesome.</div>', 4)
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Awesome tabular help text is awesome.)" '
'title="Awesome tabular help text is awesome." />',
1
)
# ReadOnly fields
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Help text for ReadOnlyInline)" '
'title="Help text for ReadOnlyInline" />',
1
)
def test_inline_hidden_field_no_column(self):
"""#18263 -- Make sure hidden fields don't get a column in tabular inlines"""
parent = SomeParentModel.objects.create(name='a')
SomeChildModel.objects.create(name='b', position='0', parent=parent)
SomeChildModel.objects.create(name='c', position='1', parent=parent)
response = self.client.get(reverse('admin:admin_inlines_someparentmodel_change', args=(parent.pk,)))
self.assertNotContains(response, '<td class="field-position">')
self.assertContains(response, (
'<input id="id_somechildmodel_set-1-position" '
'name="somechildmodel_set-1-position" type="hidden" value="1" />'))
def test_non_related_name_inline(self):
"""
Ensure that multiple inlines with related_name='+' have correct form
prefixes. Bug #16838.
"""
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(response, '<input type="hidden" name="-1-0-id" id="id_-1-0-id" />', html=True)
self.assertContains(
response,
'<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia" />',
html=True
)
self.assertContains(
response,
'<input id="id_-1-0-name" type="text" class="vTextField" name="-1-0-name" maxlength="100" />',
html=True
)
self.assertContains(response, '<input type="hidden" name="-2-0-id" id="id_-2-0-id" />', html=True)
self.assertContains(
response,
'<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia" />',
html=True
)
self.assertContains(
response,
'<input id="id_-2-0-name" type="text" class="vTextField" name="-2-0-name" maxlength="100" />',
html=True
)
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_localize_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for locales that use
thousand separators
"""
holder = Holder.objects.create(pk=123456789, dummy=42)
inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='')
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.id,)))
inner_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(inner).pk, inner.pk)
self.assertContains(response, inner_shortcut)
def test_custom_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for models with a
custom primary key field. Bug #18433.
"""
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
child1_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child1).pk, child1.pk)
child2_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child2).pk, child2.pk)
self.assertContains(response, child1_shortcut)
self.assertContains(response, child2_shortcut)
def test_create_inlines_on_inherited_model(self):
"""
Ensure that an object can be created with inlines when it inherits
another class. Bug #19524.
"""
data = {
'name': 'Martian',
'sighting_set-TOTAL_FORMS': 1,
'sighting_set-INITIAL_FORMS': 0,
'sighting_set-MAX_NUM_FORMS': 0,
'sighting_set-0-place': 'Zone 51',
'_save': 'Save',
}
response = self.client.post(reverse('admin:admin_inlines_extraterrestrial_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1)
def test_custom_get_extra_form(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
# The maximum number of forms should respect 'get_max_num' on the
# ModelAdmin
max_forms_input = (
'<input id="id_binarytree_set-MAX_NUM_FORMS" '
'name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d" />'
)
# The total number of forms will remain the same in either case
total_forms_hidden = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="2" />'
)
response = self.client.get(reverse('admin:admin_inlines_binarytree_add'))
self.assertContains(response, max_forms_input % 3)
self.assertContains(response, total_forms_hidden)
response = self.client.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
self.assertContains(response, max_forms_input % 2)
self.assertContains(response, total_forms_hidden)
def test_min_num(self):
"""
Ensure that min_num and extra determine number of forms.
"""
class MinNumInline(TabularInline):
model = BinaryTree
min_num = 2
extra = 3
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="2" />'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="5" />'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertContains(response, min_forms)
self.assertContains(response, total_forms)
def test_custom_min_num(self):
"""
Ensure that get_min_num is called and used correctly.
"""
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
class MinNumInline(TabularInline):
model = BinaryTree
extra = 3
def get_min_num(self, request, obj=None, **kwargs):
if obj:
return 5
return 2
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="%d" />'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="%d" />'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertContains(response, min_forms % 2)
self.assertContains(response, total_forms % 5)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(bt_head.id))
self.assertContains(response, min_forms % 5)
self.assertContains(response, total_forms % 8)
def test_inline_nonauto_noneditable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input id="id_nonautopkbook_set-0-rand_pk" '
'name="nonautopkbook_set-0-rand_pk" type="hidden" />',
html=True
)
self.assertContains(
response,
'<input id="id_nonautopkbook_set-2-0-rand_pk" '
'name="nonautopkbook_set-2-0-rand_pk" type="hidden" />',
html=True
)
def test_inline_editable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" '
'name="editablepkbook_set-0-manual_pk" type="number" />',
html=True, count=1
)
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" '
'name="editablepkbook_set-2-0-manual_pk" type="number" />',
html=True, count=1
)
def test_stacked_inline_edit_form_contains_has_original_class(self):
holder = Holder.objects.create(dummy=1)
holder.inner_set.create(dummy=1)
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.pk,)))
self.assertContains(
response,
'<div class="inline-related has_original" id="inner_set-0">',
count=1
)
self.assertContains(
response,
'<div class="inline-related" id="inner_set-1">',
count=1
)
def test_inlines_show_change_link_registered(self):
"Inlines `show_change_link` for registered models when enabled."
holder = Holder4.objects.create(dummy=1)
item1 = Inner4Stacked.objects.create(dummy=1, holder=holder)
item2 = Inner4Tabular.objects.create(dummy=1, holder=holder)
items = (
('inner4stacked', item1.pk),
('inner4tabular', item2.pk),
)
response = self.client.get(reverse('admin:admin_inlines_holder4_change', args=(holder.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
for model, pk in items:
url = reverse('admin:admin_inlines_%s_change' % model, args=(pk,))
self.assertContains(response, '<a href="%s" %s' % (url, INLINE_CHANGELINK_HTML))
def test_inlines_show_change_link_unregistered(self):
"Inlines `show_change_link` disabled for unregistered models."
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
self.assertFalse(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
def test_tabular_inline_show_change_link_false_registered(self):
"Inlines `show_change_link` disabled by default."
poll = Poll.objects.create(name="New poll")
Question.objects.create(poll=poll)
response = self.client.get(reverse('admin:admin_inlines_poll_change', args=(poll.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlineMedia(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder3_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_inline_scripts.js')
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
self.assertContains(response, 'my_awesome_inline_scripts.js')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlineAdminForm(TestCase):
def test_immutable_content_type(self):
"""Regression for #9362
The problem depends only on InlineAdminForm and its "original"
argument, so we can safely set the other arguments to None/{}. We just
need to check that the content_type argument of Child isn't altered by
the internals of the inline form."""
sally = Teacher.objects.create(name='Sally')
john = Parent.objects.create(name='John')
joe = Child.objects.create(name='Joe', teacher=sally, parent=john)
iaf = InlineAdminForm(None, None, {}, {}, joe)
parent_ct = ContentType.objects.get_for_model(Parent)
self.assertEqual(iaf.original.content_type, parent_ct)
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlineProtectedOnDelete(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_deleting_inline_with_protected_delete_does_not_validate(self):
lotr = Novel.objects.create(name='Lord of the rings')
chapter = Chapter.objects.create(novel=lotr, name='Many Meetings')
foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda')
change_url = reverse('admin:admin_inlines_novel_change', args=(lotr.id,))
response = self.client.get(change_url)
data = {
'name': lotr.name,
'chapter_set-TOTAL_FORMS': 1,
'chapter_set-INITIAL_FORMS': 1,
'chapter_set-MAX_NUM_FORMS': 1000,
'_save': 'Save',
'chapter_set-0-id': chapter.id,
'chapter_set-0-name': chapter.name,
'chapter_set-0-novel': lotr.id,
'chapter_set-0-DELETE': 'on'
}
response = self.client.post(change_url, data)
self.assertContains(response, "Deleting chapter %s would require deleting "
"the following protected related objects: foot note %s"
% (chapter, foot_note))
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlinePermissions(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
def setUp(self):
self.user = User(username='admin')
self.user.is_staff = True
self.user.is_active = True
self.user.set_password('secret')
self.user.save()
self.author_ct = ContentType.objects.get_for_model(Author)
self.holder_ct = ContentType.objects.get_for_model(Holder2)
self.book_ct = ContentType.objects.get_for_model(Book)
self.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(codename='add_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='add_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name='The Author')
book = author.books.create(name='The inline Book')
self.author_change_url = reverse('admin:admin_inlines_author_change', args=(author.id,))
# Get the ID of the automatically created intermediate model for the Author-Book m2m
author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=book)
self.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
holder = Holder2.objects.create(dummy=13)
inner2 = Inner2.objects.create(dummy=42, holder=holder)
self.holder_change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,))
self.inner2_id = inner2.id
self.client.force_login(self.user)
def test_inline_add_m2m_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on Books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# Add permission on inner2s, so we get the inline
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS" />', html=True)
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(codename='change_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(response, 'Add another Author-book relationship')
self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" '
'value="4" name="Author_books-TOTAL_FORMS" />', html=True)
self.assertContains(
response,
'<input type="hidden" id="id_Author_books-0-id" value="%i" '
'name="Author_books-0-id" />' % self.author_book_auto_m2m_intermediate_id,
html=True
)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
# 3 extra forms only, not the existing instance form
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="3" '
'name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertNotContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id" />' % self.inner2_id,
html=True
)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add new
self.assertContains(response, '<h2>Inner2s</h2>')
# Just the one form for existing instances
self.assertContains(
response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id" />' % self.inner2_id,
html=True
)
# max-num 0 means we can't add new ones
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" value="0" name="inner2_set-MAX_NUM_FORMS" />',
html=True
)
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance and three extra for new
self.assertContains(
response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id" />' % self.inner2_id,
html=True
)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, no new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id" />' % self.inner2_id,
html=True
)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, three for new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id" />' % self.inner2_id,
html=True
)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ['admin_inlines'] + AdminSeleniumTestCase.available_apps
def setUp(self):
User.objects.create_superuser(username='super', password='secret', email='super@example.com')
def test_add_stackeds(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
stacked formset.
"""
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add'))
inline_id = '#inner4stacked_set-group'
def rows_length():
return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
self.assertEqual(rows_length(), 4)
def test_delete_stackeds(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add'))
inline_id = '#inner4stacked_set-group'
def rows_length():
return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
add_button.click()
self.assertEqual(rows_length(), 5, msg="sanity check")
for delete_link in self.selenium.find_elements_by_css_selector('%s .inline-deletelink' % inline_id):
delete_link.click()
self.assertEqual(rows_length(), 3)
def test_add_inlines(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
inline form.
"""
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add'))
# Check that there's only one inline to start with and that it has the
# correct ID.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 1)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[0].get_attribute('id'),
'profile_set-0')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1)
# Add an inline
self.selenium.find_element_by_link_text('Add another Profile').click()
# Check that the inline has been added, that it has the right id, and
# that it contains the right fields.
self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 2)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1)
# Let's add another one to be sure
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 3)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1)
# Enter some data and click 'Save'
self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1')
self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2')
self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1')
self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2')
self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1')
self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2')
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
# Check that the objects have been created in the database
self.assertEqual(ProfileCollection.objects.all().count(), 1)
self.assertEqual(Profile.objects.all().count(), 3)
def test_delete_inlines(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 5)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1)
# Click on a few delete buttons
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click()
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click()
# Verify that they're gone and that the IDs have been re-sequenced
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 3)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
def test_alternating_rows(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
row_selector = 'form#profilecollection_form tr.dynamic-profile_set'
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row1" % row_selector)), 2, msg="Expect two row1 styled rows")
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row2" % row_selector)), 1, msg="Expect one row2 styled row")
def test_collapsed_inlines(self):
# Collapsed inlines have SHOW/HIDE links.
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_author_add'))
# One field is in a stacked inline, other in a tabular one.
test_fields = ['#id_nonautopkbook_set-0-title', '#id_nonautopkbook_set-2-0-title']
show_links = self.selenium.find_elements_by_link_text('SHOW')
self.assertEqual(len(show_links), 2)
for show_index, field_name in enumerate(test_fields, 0):
self.wait_until_invisible(field_name)
show_links[show_index].click()
self.wait_until_visible(field_name)
hide_links = self.selenium.find_elements_by_link_text('HIDE')
self.assertEqual(len(hide_links), 2)
for hide_index, field_name in enumerate(test_fields, 0):
self.wait_until_visible(field_name)
hide_links[hide_index].click()
self.wait_until_invisible(field_name)
| |
from typing import Optional, List, Sequence, Tuple
import os
import re
import subprocess
import sys
import tarfile
import tempfile
import threading
import yaml
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
import ray # noqa: F401
from ray.autoscaler._private.cli_logger import cli_logger
from ray.autoscaler._private.providers import _get_node_provider
from ray.autoscaler.tags import TAG_RAY_NODE_KIND, NODE_KIND_HEAD, NODE_KIND_WORKER
# Import psutil after ray so the packaged version is used.
import psutil
MAX_PARALLEL_SSH_WORKERS = 8
DEFAULT_SSH_USER = "ubuntu"
DEFAULT_SSH_KEYS = ["~/ray_bootstrap_key.pem", "~/.ssh/ray-autoscaler_2_us-west-2.pem"]
class CommandFailed(RuntimeError):
pass
class LocalCommandFailed(CommandFailed):
pass
class RemoteCommandFailed(CommandFailed):
pass
class GetParameters:
def __init__(
self,
logs: bool = True,
debug_state: bool = True,
pip: bool = True,
processes: bool = True,
processes_verbose: bool = True,
processes_list: Optional[List[Tuple[str, bool]]] = None,
):
self.logs = logs
self.debug_state = debug_state
self.pip = pip
self.processes = processes
self.processes_verbose = processes_verbose
self.processes_list = processes_list
class Node:
"""Node (as in "machine")"""
def __init__(
self,
host: str,
ssh_user: str = "ubuntu",
ssh_key: str = "~/ray_bootstrap_key.pem",
docker_container: Optional[str] = None,
is_head: bool = False,
):
self.host = host
self.ssh_user = ssh_user
self.ssh_key = ssh_key
self.docker_container = docker_container
self.is_head = is_head
class Archive:
"""Archive object to collect and compress files into a single file.
Objects of this class can be passed around to different data collection
functions. These functions can use the :meth:`subdir` method to add
files to a sub directory of the archive.
"""
def __init__(self, file: Optional[str] = None):
self.file = file or tempfile.mkstemp(prefix="ray_logs_", suffix=".tar.gz")[1]
self.tar = None
self._lock = threading.Lock()
@property
def is_open(self):
return bool(self.tar)
def open(self):
self.tar = tarfile.open(self.file, "w:gz")
def close(self):
self.tar.close()
self.tar = None
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@contextmanager
def subdir(self, subdir: str, root: Optional[str] = "/"):
"""Open a context to add files to the archive.
Example:
.. code-block:: python
with Archive("file.tar.gz") as archive:
with archive.subdir("logfiles", root="/tmp/logs") as sd:
# Will be added as `logfiles/nested/file.txt`
sd.add("/tmp/logs/nested/file.txt")
Args:
subdir (str): Subdir to which to add files to. Calling the
``add(path)`` command will place files into the ``subdir``
directory of the archive.
root (str): Root path. Files without an explicit ``arcname``
will be named relatively to this path.
Yields:
A context object that can be used to add files to the archive.
"""
root = os.path.abspath(root)
class _Context:
@staticmethod
def add(path: str, arcname: Optional[str] = None):
path = os.path.abspath(path)
arcname = arcname or os.path.join(subdir, os.path.relpath(path, root))
self._lock.acquire()
self.tar.add(path, arcname=arcname)
self._lock.release()
yield _Context()
###
# Functions to gather logs and information on the local node
###
def get_local_ray_logs(
archive: Archive,
exclude: Optional[Sequence[str]] = None,
session_log_dir: str = "/tmp/ray/session_latest",
) -> Archive:
"""Copy local log files into an archive.
Args:
archive (Archive): Archive object to add log files to.
exclude (Sequence[str]): Sequence of regex patterns. Files that match
any of these patterns will not be included in the archive.
session_dir (str): Path to the Ray session files. Defaults to
``/tmp/ray/session_latest``
Returns:
Open archive object.
"""
if not archive.is_open:
archive.open()
exclude = exclude or []
session_log_dir = os.path.join(os.path.expanduser(session_log_dir), "logs")
with archive.subdir("logs", root=session_log_dir) as sd:
for root, dirs, files in os.walk(session_log_dir):
for file in files:
file_path = os.path.join(root, file)
rel_path = os.path.relpath(file_path, start=session_log_dir)
# Skip file if it matches any pattern in `exclude`
if any(re.match(pattern, rel_path) for pattern in exclude):
continue
sd.add(file_path)
return archive
def get_local_debug_state(
archive: Archive, session_dir: str = "/tmp/ray/session_latest"
) -> Archive:
"""Copy local log files into an archive.
Args:
archive (Archive): Archive object to add log files to.
session_dir (str): Path to the Ray session files. Defaults to
``/tmp/ray/session_latest``
Returns:
Open archive object.
"""
if not archive.is_open:
archive.open()
session_dir = os.path.expanduser(session_dir)
debug_state_file = os.path.join(session_dir, "logs/debug_state.txt")
if not os.path.exists(debug_state_file):
raise LocalCommandFailed("No `debug_state.txt` file found.")
with archive.subdir("", root=session_dir) as sd:
sd.add(debug_state_file)
return archive
def get_local_pip_packages(archive: Archive):
"""Get currently installed pip packages and write into an archive.
Args:
archive (Archive): Archive object to add meta files to.
Returns:
Open archive object.
"""
if not archive.is_open:
archive.open()
try:
from pip._internal.operations import freeze
except ImportError: # pip < 10.0
from pip.operations import freeze
with tempfile.NamedTemporaryFile("wt") as fp:
for line in freeze.freeze():
fp.writelines([line, "\n"])
fp.flush()
with archive.subdir("") as sd:
sd.add(fp.name, "pip_packages.txt")
return archive
def get_local_ray_processes(
archive: Archive,
processes: Optional[List[Tuple[str, bool]]] = None,
verbose: bool = False,
):
"""Get the status of all the relevant ray processes.
Args:
archive (Archive): Archive object to add process info files to.
processes (list): List of processes to get information on. The first
element of the tuple is a string to filter by, and the second
element is a boolean indicating if we should filter by command
name (True) or command line including parameters (False)
verbose (bool): If True, show entire executable command line.
If False, show just the first term.
Returns:
Open archive object.
"""
if not processes:
# local import to avoid circular dependencies
from ray.autoscaler._private.constants import RAY_PROCESSES
processes = RAY_PROCESSES
process_infos = []
for process in psutil.process_iter(["pid", "name", "cmdline", "status"]):
try:
with process.oneshot():
cmdline = " ".join(process.cmdline())
process_infos.append(
(
{
"executable": cmdline
if verbose
else cmdline.split("--", 1)[0][:-1],
"name": process.name(),
"pid": process.pid,
"status": process.status(),
},
process.cmdline(),
)
)
except Exception as exc:
raise LocalCommandFailed(exc) from exc
relevant_processes = {}
for process_dict, cmdline in process_infos:
for keyword, filter_by_cmd in processes:
if filter_by_cmd:
corpus = process_dict["name"]
else:
corpus = subprocess.list2cmdline(cmdline)
if keyword in corpus and process_dict["pid"] not in relevant_processes:
relevant_processes[process_dict["pid"]] = process_dict
with tempfile.NamedTemporaryFile("wt") as fp:
for line in relevant_processes.values():
fp.writelines([yaml.dump(line), "\n"])
fp.flush()
with archive.subdir("meta") as sd:
sd.add(fp.name, "process_info.txt")
return archive
def get_all_local_data(archive: Archive, parameters: GetParameters):
"""Get all local data.
Gets:
- The Ray logs of the latest session
- The currently installed pip packages
Args:
archive (Archive): Archive object to add meta files to.
parameters (GetParameters): Parameters (settings) for getting data.
Returns:
Open archive object.
"""
if not archive.is_open:
archive.open()
if parameters.logs:
try:
get_local_ray_logs(archive=archive)
except LocalCommandFailed as exc:
cli_logger.error(exc)
if parameters.debug_state:
try:
get_local_debug_state(archive=archive)
except LocalCommandFailed as exc:
cli_logger.error(exc)
if parameters.pip:
try:
get_local_pip_packages(archive=archive)
except LocalCommandFailed as exc:
cli_logger.error(exc)
if parameters.processes:
try:
get_local_ray_processes(
archive=archive,
processes=parameters.processes_list,
verbose=parameters.processes_verbose,
)
except LocalCommandFailed as exc:
cli_logger.error(exc)
return archive
###
# Functions to invoke remote scripts and gather data from remote nodes
###
def _wrap(items: List[str], quotes="'"):
return f"{quotes}{' '.join(items)}{quotes}"
def create_and_get_archive_from_remote_node(
remote_node: Node, parameters: GetParameters, script_path: str = "ray"
) -> Optional[str]:
"""Create an archive containing logs on a remote node and transfer.
This will call ``ray local-dump --stream`` on the remote
node. The resulting file will be saved locally in a temporary file and
returned.
Args:
remote_node (Node): Remote node to gather archive from.
script_path (str): Path to this script on the remote node.
parameters (GetParameters): Parameters (settings) for getting data.
Returns:
Path to a temporary file containing the node's collected data.
"""
cmd = [
"ssh",
"-o StrictHostKeyChecking=no",
"-o UserKnownHostsFile=/dev/null",
"-o LogLevel=ERROR",
"-i",
remote_node.ssh_key,
f"{remote_node.ssh_user}@{remote_node.host}",
]
if remote_node.docker_container:
cmd += [
"docker",
"exec",
remote_node.docker_container,
]
collect_cmd = [script_path, "local-dump", "--stream"]
collect_cmd += ["--logs"] if parameters.logs else ["--no-logs"]
collect_cmd += ["--debug-state"] if parameters.debug_state else ["--no-debug-state"]
collect_cmd += ["--pip"] if parameters.pip else ["--no-pip"]
collect_cmd += ["--processes"] if parameters.processes else ["--no-processes"]
if parameters.processes:
collect_cmd += (
["--processes-verbose"]
if parameters.processes_verbose
else ["--no-proccesses-verbose"]
)
cmd += ["/bin/bash", "-c", _wrap(collect_cmd, quotes='"')]
cat = "node" if not remote_node.is_head else "head"
cli_logger.print(f"Collecting data from remote node: {remote_node.host}")
tmp = tempfile.mkstemp(prefix=f"ray_{cat}_{remote_node.host}_", suffix=".tar.gz")[1]
with open(tmp, "wb") as fp:
try:
subprocess.check_call(cmd, stdout=fp, stderr=sys.stderr)
except subprocess.CalledProcessError as exc:
raise RemoteCommandFailed(
f"Gathering logs from remote node failed: {' '.join(cmd)}"
) from exc
return tmp
def create_and_add_remote_data_to_local_archive(
archive: Archive, remote_node: Node, parameters: GetParameters
):
"""Create and get data from remote node and add to local archive.
Args:
archive (Archive): Archive object to add remote data to.
remote_node (Node): Remote node to gather archive from.
parameters (GetParameters): Parameters (settings) for getting data.
Returns:
Open archive object.
"""
tmp = create_and_get_archive_from_remote_node(remote_node, parameters)
if not archive.is_open:
archive.open()
cat = "node" if not remote_node.is_head else "head"
with archive.subdir("", root=os.path.dirname(tmp)) as sd:
sd.add(tmp, arcname=f"ray_{cat}_{remote_node.host}.tar.gz")
return archive
def create_and_add_local_data_to_local_archive(
archive: Archive, parameters: GetParameters
):
"""Create and get data from this node and add to archive.
Args:
archive (Archive): Archive object to add remote data to.
parameters (GetParameters): Parameters (settings) for getting data.
Returns:
Open archive object.
"""
with Archive() as local_data_archive:
get_all_local_data(local_data_archive, parameters)
if not archive.is_open:
archive.open()
with archive.subdir("", root=os.path.dirname(local_data_archive.file)) as sd:
sd.add(local_data_archive.file, arcname="local_node.tar.gz")
os.remove(local_data_archive.file)
return archive
def create_archive_for_remote_nodes(
archive: Archive, remote_nodes: Sequence[Node], parameters: GetParameters
):
"""Create an archive combining data from the remote nodes.
This will parallelize calls to get data from remote nodes.
Args:
archive (Archive): Archive object to add remote data to.
remote_nodes (Sequence[Node]): Sequence of remote nodes.
parameters (GetParameters): Parameters (settings) for getting data.
Returns:
Open archive object.
"""
if not archive.is_open:
archive.open()
with ThreadPoolExecutor(max_workers=MAX_PARALLEL_SSH_WORKERS) as executor:
for remote_node in remote_nodes:
executor.submit(
create_and_add_remote_data_to_local_archive,
archive=archive,
remote_node=remote_node,
parameters=parameters,
)
return archive
def create_archive_for_local_and_remote_nodes(
archive: Archive, remote_nodes: Sequence[Node], parameters: GetParameters
):
"""Create an archive combining data from the local and remote nodes.
This will parallelize calls to get data from remote nodes.
Args:
archive (Archive): Archive object to add data to.
remote_nodes (Sequence[Node]): Sequence of remote nodes.
parameters (GetParameters): Parameters (settings) for getting data.
Returns:
Open archive object.
"""
if not archive.is_open:
archive.open()
try:
create_and_add_local_data_to_local_archive(archive, parameters)
except CommandFailed as exc:
cli_logger.error(exc)
create_archive_for_remote_nodes(archive, remote_nodes, parameters)
cli_logger.print(
f"Collected data from local node and {len(remote_nodes)} " f"remote nodes."
)
return archive
###
# Ray cluster info
###
def get_info_from_ray_cluster_config(
cluster_config: str,
) -> Tuple[List[str], str, str, Optional[str], Optional[str]]:
"""Get information from Ray cluster config.
Return list of host IPs, ssh user, ssh key file, and optional docker
container.
Args:
cluster_config (str): Path to ray cluster config.
Returns:
Tuple of list of host IPs, ssh user name, ssh key file path,
optional docker container name, optional cluster name.
"""
from ray.autoscaler._private.commands import _bootstrap_config
cli_logger.print(
f"Retrieving cluster information from ray cluster file: " f"{cluster_config}"
)
cluster_config = os.path.expanduser(cluster_config)
config = yaml.safe_load(open(cluster_config).read())
config = _bootstrap_config(config, no_config_cache=True)
provider = _get_node_provider(config["provider"], config["cluster_name"])
head_nodes = provider.non_terminated_nodes({TAG_RAY_NODE_KIND: NODE_KIND_HEAD})
worker_nodes = provider.non_terminated_nodes({TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
hosts = [provider.external_ip(node) for node in head_nodes + worker_nodes]
ssh_user = config["auth"]["ssh_user"]
ssh_key = config["auth"]["ssh_private_key"]
docker = None
docker_config = config.get("docker", None)
if docker_config:
docker = docker_config.get("container_name", None)
cluster_name = config.get("cluster_name", None)
return hosts, ssh_user, ssh_key, docker, cluster_name
def _info_from_params(
cluster: Optional[str] = None,
host: Optional[str] = None,
ssh_user: Optional[str] = None,
ssh_key: Optional[str] = None,
docker: Optional[str] = None,
):
"""Parse command line arguments.
Note: This returns a list of hosts, not a comma separated string!
"""
if not host and not cluster:
bootstrap_config = os.path.expanduser("~/ray_bootstrap_config.yaml")
if os.path.exists(bootstrap_config):
cluster = bootstrap_config
cli_logger.warning(
f"Detected cluster config file at {cluster}. "
f"If this is incorrect, specify with "
f"`ray cluster-dump <config>`"
)
elif cluster:
cluster = os.path.expanduser(cluster)
cluster_name = None
if cluster:
h, u, k, d, cluster_name = get_info_from_ray_cluster_config(cluster)
ssh_user = ssh_user or u
ssh_key = ssh_key or k
docker = docker or d
hosts = host.split(",") if host else h
if not hosts:
raise LocalCommandFailed(
f"Invalid cluster file or cluster has no running nodes: " f"{cluster}"
)
elif host:
hosts = host.split(",")
else:
raise LocalCommandFailed(
"You need to either specify a `<cluster_config>` or `--host`."
)
if not ssh_user:
ssh_user = DEFAULT_SSH_USER
cli_logger.warning(
f"Using default SSH user `{ssh_user}`. "
f"If this is incorrect, specify with `--ssh-user <user>`"
)
if not ssh_key:
for cand_key in DEFAULT_SSH_KEYS:
cand_key_file = os.path.expanduser(cand_key)
if os.path.exists(cand_key_file):
ssh_key = cand_key_file
cli_logger.warning(
f"Auto detected SSH key file: {ssh_key}. "
f"If this is incorrect, specify with `--ssh-key <key>`"
)
break
return cluster, hosts, ssh_user, ssh_key, docker, cluster_name
| |
import copy
import pickle
import warnings
import sys
from sympy.utilities.pytest import XFAIL
from sympy.core.basic import Atom, Basic
from sympy.core.core import BasicMeta, BasicType, ClassRegistry
from sympy.core.singleton import SingletonRegistry
from sympy.core.symbol import Dummy, Symbol, Wild
from sympy.core.numbers import (E, I, pi, oo, zoo, nan, Integer, Number,
NumberSymbol, Rational, Float)
from sympy.core.relational import (Equality, GreaterThan, LessThan, Relational,
StrictGreaterThan, StrictLessThan, Unequality)
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.power import Pow
from sympy.core.function import Derivative, Function, FunctionClass, Lambda, \
WildFunction
from sympy.sets.sets import Interval
from sympy.core.multidimensional import vectorize
from sympy.functions import exp
#from sympy.core.ast_parser import SymPyParser, SymPyTransformer
from sympy.core.compatibility import HAS_GMPY, PY3
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy import symbols, S
excluded_attrs = set(['_assumptions', '_mhash'])
def check(a, exclude=[], check_attr=True):
""" Check that pickling and copying round-trips.
"""
# Python 2.6+ warns about BasicException.message, for example.
warnings.filterwarnings("ignore", category=DeprecationWarning)
protocols = [0, 1, 2, copy.copy, copy.deepcopy]
# Python 2.x doesn't support the third pickling protocol
if PY3:
protocols.extend([3])
for protocol in protocols:
if protocol in exclude:
continue
if callable(protocol):
if isinstance(a, BasicType):
# Classes can't be copied, but that's okay.
return
b = protocol(a)
else:
b = pickle.loads(pickle.dumps(a, protocol))
d1 = dir(a)
d2 = dir(b)
assert set(d1) == set(d2)
if not check_attr:
continue
def c(a, b, d):
for i in d:
if not hasattr(a, i) or i in excluded_attrs:
continue
attr = getattr(a, i)
if not hasattr(attr, "__call__"):
assert hasattr(b, i), i
assert getattr(b, i) == attr, "%s != %s" % (getattr(b, i), attr)
c(a, b, d1)
c(b, a, d2)
# reset filters
warnings.simplefilter("default", category=DeprecationWarning)
warnings.simplefilter("error", category=SymPyDeprecationWarning)
#================== core =========================
def test_core_basic():
for c in (Atom, Atom(),
Basic, Basic(),
# XXX: dynamically created types are not picklable
# BasicMeta, BasicMeta("test", (), {}),
# BasicType, BasicType("test", (), {}),
ClassRegistry, ClassRegistry(),
SingletonRegistry, SingletonRegistry()):
check(c)
def test_core_symbol():
# make the Symbol a unique name that doesn't class with any other
# testing variable in this file since after this test the symbol
# having the same name will be cached as noncommutative
for c in (Dummy, Dummy("x", commutative=False), Symbol,
Symbol("_issue_3130", commutative=False), Wild, Wild("x")):
check(c)
def test_core_numbers():
for c in (Integer(2), Rational(2, 3), Float("1.2")):
check(c)
def test_core_relational():
x = Symbol("x")
y = Symbol("y")
for c in (Equality, Equality(x, y), GreaterThan, GreaterThan(x, y),
LessThan, LessThan(x, y), Relational, Relational(x, y),
StrictGreaterThan, StrictGreaterThan(x, y), StrictLessThan,
StrictLessThan(x, y), Unequality, Unequality(x, y)):
check(c)
def test_core_add():
x = Symbol("x")
for c in (Add, Add(x, 4)):
check(c)
def test_core_mul():
x = Symbol("x")
for c in (Mul, Mul(x, 4)):
check(c)
def test_core_power():
x = Symbol("x")
for c in (Pow, Pow(x, 4)):
check(c)
def test_core_function():
x = Symbol("x")
for f in (Derivative, Derivative(x), Function, FunctionClass, Lambda,
WildFunction):
check(f)
@XFAIL
def test_core_dynamicfunctions():
# This fails because f is assumed to be a class at sympy.basic.function.f
f = Function("f")
check(f)
def test_core_interval():
for c in (Interval, Interval(0, 2)):
check(c)
def test_core_multidimensional():
for c in (vectorize, vectorize(0)):
check(c)
def test_Singletons():
protocols = [0, 1, 2]
if PY3:
protocols.extend([3])
copiers = [copy.copy, copy.deepcopy]
copiers += [lambda x: pickle.loads(pickle.dumps(x, proto))
for proto in protocols]
for obj in (Integer(-1), Integer(0), Integer(1), Rational(1, 2), pi, E, I,
oo, -oo, zoo, nan, S.GoldenRatio, S.EulerGamma, S.Catalan,
S.EmptySet, S.IdentityFunction):
for func in copiers:
assert func(obj) is obj
#================== functions ===================
from sympy.functions import (Piecewise, lowergamma, acosh,
chebyshevu, chebyshevt, ln, chebyshevt_root, binomial, legendre,
Heaviside, factorial, bernoulli, coth, tanh, assoc_legendre, sign,
arg, asin, DiracDelta, re, rf, Abs, uppergamma, binomial, sinh, Ynm,
cos, cot, acos, acot, gamma, bell, hermite, harmonic,
LambertW, zeta, log, factorial, asinh, acoth, Znm,
cosh, dirichlet_eta, Eijk, loggamma, erf, ceiling, im, fibonacci,
conjugate, tan, chebyshevu_root, floor, atanh, sqrt,
RisingFactorial, sin, atan, ff, FallingFactorial, lucas, atan2,
polygamma, exp)
def test_functions():
one_var = (acosh, ln, Heaviside, factorial, bernoulli, coth, tanh,
sign, arg, asin, DiracDelta, re, Abs, sinh, cos, cot, acos, acot,
gamma, bell, harmonic, LambertW, zeta, log, factorial, asinh,
acoth, cosh, dirichlet_eta, loggamma, erf, ceiling, im, fibonacci,
conjugate, tan, floor, atanh, sin, atan, lucas, exp)
two_var = (rf, ff, lowergamma, chebyshevu, chebyshevt, binomial,
atan2, polygamma, hermite, legendre, uppergamma)
x, y, z = symbols("x,y,z")
others = (chebyshevt_root, chebyshevu_root, Eijk(x, y, z),
Piecewise( (0, x < -1), (x**2, x <= 1), (x**3, True)),
assoc_legendre)
for cls in one_var:
check(cls)
c = cls(x)
check(c)
for cls in two_var:
check(cls)
c = cls(x, y)
check(c)
for cls in others:
check(cls)
#================== geometry ====================
from sympy.geometry.entity import GeometryEntity
from sympy.geometry.point import Point
from sympy.geometry.ellipse import Circle, Ellipse
from sympy.geometry.line import Line, LinearEntity, Ray, Segment
from sympy.geometry.polygon import Polygon, RegularPolygon, Triangle
def test_geometry():
p1 = Point(1, 2)
p2 = Point(2, 3)
p3 = Point(0, 0)
p4 = Point(0, 1)
for c in (
GeometryEntity, GeometryEntity(), Point, p1, Circle, Circle(p1, 2),
Ellipse, Ellipse(p1, 3, 4), Line, Line(p1, p2), LinearEntity,
LinearEntity(p1, p2), Ray, Ray(p1, p2), Segment, Segment(p1, p2),
Polygon, Polygon(p1, p2, p3, p4), RegularPolygon,
RegularPolygon(p1, 4, 5), Triangle, Triangle(p1, p2, p3)):
check(c, check_attr=False)
#================== integrals ====================
from sympy.integrals.integrals import Integral
def test_integrals():
x = Symbol("x")
for c in (Integral, Integral(x)):
check(c)
#==================== logic =====================
from sympy.core.logic import Logic
def test_logic():
for c in (Logic, Logic(1)):
check(c)
#================== matrices ====================
from sympy.matrices import Matrix, SparseMatrix
def test_matrices():
for c in (Matrix, Matrix([1, 2, 3]), SparseMatrix, SparseMatrix([[1, 2], [3, 4]])):
check(c)
#================== ntheory =====================
from sympy.ntheory.generate import Sieve
def test_ntheory():
for c in (Sieve, Sieve()):
check(c)
#================== physics =====================
from sympy.physics.paulialgebra import Pauli
from sympy.physics.units import Unit
def test_physics():
for c in (Unit, Unit("meter", "m"), Pauli, Pauli(1)):
check(c)
#================== plotting ====================
# XXX: These tests are not complete, so XFAIL them
@XFAIL
def test_plotting():
from sympy.plotting.color_scheme import ColorGradient, ColorScheme
from sympy.plotting.managed_window import ManagedWindow
from sympy.plotting.plot import Plot, ScreenShot
from sympy.plotting.plot_axes import PlotAxes, PlotAxesBase, PlotAxesFrame, PlotAxesOrdinate
from sympy.plotting.plot_camera import PlotCamera
from sympy.plotting.plot_controller import PlotController
from sympy.plotting.plot_curve import PlotCurve
from sympy.plotting.plot_interval import PlotInterval
from sympy.plotting.plot_mode import PlotMode
from sympy.plotting.plot_modes import Cartesian2D, Cartesian3D, Cylindrical, \
ParametricCurve2D, ParametricCurve3D, ParametricSurface, Polar, Spherical
from sympy.plotting.plot_object import PlotObject
from sympy.plotting.plot_surface import PlotSurface
from sympy.plotting.plot_window import PlotWindow
for c in (
ColorGradient, ColorGradient(0.2, 0.4), ColorScheme, ManagedWindow,
ManagedWindow, Plot, ScreenShot, PlotAxes, PlotAxesBase,
PlotAxesFrame, PlotAxesOrdinate, PlotCamera, PlotController,
PlotCurve, PlotInterval, PlotMode, Cartesian2D, Cartesian3D,
Cylindrical, ParametricCurve2D, ParametricCurve3D,
ParametricSurface, Polar, Spherical, PlotObject, PlotSurface,
PlotWindow):
check(c)
@XFAIL
def test_plotting2():
from sympy.plotting.color_scheme import ColorGradient, ColorScheme
from sympy.plotting.managed_window import ManagedWindow
from sympy.plotting.plot import Plot, ScreenShot
from sympy.plotting.plot_axes import PlotAxes, PlotAxesBase, PlotAxesFrame, PlotAxesOrdinate
from sympy.plotting.plot_camera import PlotCamera
from sympy.plotting.plot_controller import PlotController
from sympy.plotting.plot_curve import PlotCurve
from sympy.plotting.plot_interval import PlotInterval
from sympy.plotting.plot_mode import PlotMode
from sympy.plotting.plot_modes import Cartesian2D, Cartesian3D, Cylindrical, \
ParametricCurve2D, ParametricCurve3D, ParametricSurface, Polar, Spherical
from sympy.plotting.plot_object import PlotObject
from sympy.plotting.plot_surface import PlotSurface
from sympy.plotting.plot_window import PlotWindow
check(ColorScheme("rainbow"))
check(Plot(1, visible=False))
check(PlotAxes())
#================== polys =======================
from sympy import Poly, ZZ, QQ, lex
def test_pickling_polys_polytools():
from sympy.polys.polytools import Poly, PurePoly, GroebnerBasis
x = Symbol('x')
for c in (Poly, Poly(x, x)):
check(c)
for c in (PurePoly, PurePoly(x)):
check(c)
# TODO: fix pickling of Options class (see GroebnerBasis._options)
# for c in (GroebnerBasis, GroebnerBasis([x**2 - 1], x, order=lex)):
# check(c)
def test_pickling_polys_polyclasses():
from sympy.polys.polyclasses import DMP, DMF, ANP
for c in (DMP, DMP([[ZZ(1)], [ZZ(2)], [ZZ(3)]], ZZ)):
check(c)
for c in (DMF, DMF(([ZZ(1), ZZ(2)], [ZZ(1), ZZ(3)]), ZZ)):
check(c)
for c in (ANP, ANP([QQ(1), QQ(2)], [QQ(1), QQ(2), QQ(3)], QQ)):
check(c)
@XFAIL
def test_pickling_polys_rings():
# NOTE: can't use protocols < 2 because we have to execute __new__ to
# make sure caching of rings works properly.
from sympy.polys.rings import PolyRing
ring = PolyRing("x,y,z", ZZ, lex)
for c in (PolyRing, ring):
check(c, exclude=[0, 1])
for c in (ring.dtype, ring.one):
check(c, exclude=[0, 1], check_attr=False) # TODO: Py3k
def test_pickling_polys_fields():
# NOTE: can't use protocols < 2 because we have to execute __new__ to
# make sure caching of fields works properly.
from sympy.polys.fields import FracField
field = FracField("x,y,z", ZZ, lex)
# TODO: AssertionError: assert id(obj) not in self.memo
# for c in (FracField, field):
# check(c, exclude=[0, 1])
# TODO: AssertionError: assert id(obj) not in self.memo
# for c in (field.dtype, field.one):
# check(c, exclude=[0, 1])
def test_pickling_polys_elements():
from sympy.polys.domains.pythonrational import PythonRational
from sympy.polys.domains.pythonfinitefield import PythonFiniteField
from sympy.polys.domains.mpelements import MPContext
for c in (PythonRational, PythonRational(1, 7)):
check(c)
gf = PythonFiniteField(17)
# TODO: fix pickling of ModularInteger
# for c in (gf.dtype, gf(5)):
# check(c)
mp = MPContext()
# TODO: fix pickling of RealElement
# for c in (mp.mpf, mp.mpf(1.0)):
# check(c)
# TODO: fix pickling of ComplexElement
# for c in (mp.mpc, mp.mpc(1.0, -1.5)):
# check(c)
def test_pickling_polys_domains():
from sympy.polys.domains.pythonfinitefield import PythonFiniteField
from sympy.polys.domains.pythonintegerring import PythonIntegerRing
from sympy.polys.domains.pythonrationalfield import PythonRationalField
# TODO: fix pickling of ModularInteger
# for c in (PythonFiniteField, PythonFiniteField(17)):
# check(c)
for c in (PythonIntegerRing, PythonIntegerRing()):
check(c)
for c in (PythonRationalField, PythonRationalField()):
check(c)
if HAS_GMPY:
from sympy.polys.domains.gmpyfinitefield import GMPYFiniteField
from sympy.polys.domains.gmpyintegerring import GMPYIntegerRing
from sympy.polys.domains.gmpyrationalfield import GMPYRationalField
# TODO: fix pickling of ModularInteger
# for c in (GMPYFiniteField, GMPYFiniteField(17)):
# check(c)
for c in (GMPYIntegerRing, GMPYIntegerRing()):
check(c)
for c in (GMPYRationalField, GMPYRationalField()):
check(c)
from sympy.polys.domains.realfield import RealField
from sympy.polys.domains.complexfield import ComplexField
from sympy.polys.domains.algebraicfield import AlgebraicField
from sympy.polys.domains.polynomialring import PolynomialRing
from sympy.polys.domains.fractionfield import FractionField
from sympy.polys.domains.expressiondomain import ExpressionDomain
# TODO: fix pickling of RealElement
# for c in (RealField, RealField(100)):
# check(c)
# TODO: fix pickling of ComplexElement
# for c in (ComplexField, ComplexField(100)):
# check(c)
for c in (AlgebraicField, AlgebraicField(QQ, sqrt(3))):
check(c)
# TODO: AssertionError
# for c in (PolynomialRing, PolynomialRing(ZZ, "x,y,z")):
# check(c)
# TODO: AttributeError: 'PolyElement' object has no attribute 'ring'
# for c in (FractionField, FractionField(ZZ, "x,y,z")):
# check(c)
for c in (ExpressionDomain, ExpressionDomain()):
check(c)
def test_pickling_polys_numberfields():
from sympy.polys.numberfields import AlgebraicNumber
for c in (AlgebraicNumber, AlgebraicNumber(sqrt(3))):
check(c)
def test_pickling_polys_orderings():
from sympy.polys.orderings import (LexOrder, GradedLexOrder,
ReversedGradedLexOrder, ProductOrder, InverseOrder)
for c in (LexOrder, LexOrder()):
check(c)
for c in (GradedLexOrder, GradedLexOrder()):
check(c)
for c in (ReversedGradedLexOrder, ReversedGradedLexOrder()):
check(c)
# TODO: Argh, Python is so naive. No lambdas nor inner function support in
# pickling module. Maybe someone could figure out what to do with this.
#
# for c in (ProductOrder, ProductOrder((LexOrder(), lambda m: m[:2]),
# (GradedLexOrder(), lambda m: m[2:]))):
# check(c)
for c in (InverseOrder, InverseOrder(LexOrder())):
check(c)
def test_pickling_polys_monomials():
from sympy.polys.monomials import MonomialOps, Monomial
x, y, z = symbols("x,y,z")
for c in (MonomialOps, MonomialOps(3)):
check(c)
for c in (Monomial, Monomial((1, 2, 3), (x, y, z))):
check(c)
def test_pickling_polys_errors():
from sympy.polys.polyerrors import (ExactQuotientFailed, OperationNotSupported,
HeuristicGCDFailed, HomomorphismFailed, IsomorphismFailed, ExtraneousFactors,
EvaluationFailed, RefinementFailed, CoercionFailed, NotInvertible, NotReversible,
NotAlgebraic, DomainError, PolynomialError, UnificationFailed, GeneratorsError,
GeneratorsNeeded, ComputationFailed, UnivariatePolynomialError,
MultivariatePolynomialError, PolificationFailed, OptionError, FlagError)
x = Symbol('x')
# TODO: TypeError: __init__() takes at least 3 arguments (1 given)
# for c in (ExactQuotientFailed, ExactQuotientFailed(x, 3*x, ZZ)):
# check(c)
# TODO: TypeError: can't pickle instancemethod objects
# for c in (OperationNotSupported, OperationNotSupported(Poly(x), Poly.gcd)):
# check(c)
for c in (HeuristicGCDFailed, HeuristicGCDFailed()):
check(c)
for c in (HomomorphismFailed, HomomorphismFailed()):
check(c)
for c in (IsomorphismFailed, IsomorphismFailed()):
check(c)
for c in (ExtraneousFactors, ExtraneousFactors()):
check(c)
for c in (EvaluationFailed, EvaluationFailed()):
check(c)
for c in (RefinementFailed, RefinementFailed()):
check(c)
for c in (CoercionFailed, CoercionFailed()):
check(c)
for c in (NotInvertible, NotInvertible()):
check(c)
for c in (NotReversible, NotReversible()):
check(c)
for c in (NotAlgebraic, NotAlgebraic()):
check(c)
for c in (DomainError, DomainError()):
check(c)
for c in (PolynomialError, PolynomialError()):
check(c)
for c in (UnificationFailed, UnificationFailed()):
check(c)
for c in (GeneratorsError, GeneratorsError()):
check(c)
for c in (GeneratorsNeeded, GeneratorsNeeded()):
check(c)
# TODO: PicklingError: Can't pickle <function <lambda> at 0x38578c0>: it's not found as __main__.<lambda>
# for c in (ComputationFailed, ComputationFailed(lambda t: t, 3, None)):
# check(c)
for c in (UnivariatePolynomialError, UnivariatePolynomialError()):
check(c)
for c in (MultivariatePolynomialError, MultivariatePolynomialError()):
check(c)
# TODO: TypeError: __init__() takes at least 3 arguments (1 given)
# for c in (PolificationFailed, PolificationFailed({}, x, x, False)):
# check(c)
for c in (OptionError, OptionError()):
check(c)
for c in (FlagError, FlagError()):
check(c)
def test_pickling_polys_options():
from sympy.polys.polyoptions import Options
# TODO: fix pickling of `symbols' flag
# for c in (Options, Options((), dict(domain='ZZ', polys=False))):
# check(c)
# TODO: def test_pickling_polys_rootisolation():
# RealInterval
# ComplexInterval
def test_pickling_polys_rootoftools():
from sympy.polys.rootoftools import RootOf, RootSum
x = Symbol('x')
f = x**3 + x + 3
for c in (RootOf, RootOf(f, 0)):
check(c)
for c in (RootSum, RootSum(f, exp)):
check(c)
#================== printing ====================
from sympy.printing.latex import LatexPrinter
from sympy.printing.mathml import MathMLPrinter
from sympy.printing.pretty.pretty import PrettyPrinter
from sympy.printing.pretty.stringpict import prettyForm, stringPict
from sympy.printing.printer import Printer
from sympy.printing.python import PythonPrinter
def test_printing():
for c in (LatexPrinter, LatexPrinter(), MathMLPrinter,
PrettyPrinter, prettyForm, stringPict, stringPict("a"),
Printer, Printer(), PythonPrinter, PythonPrinter()):
check(c)
@XFAIL
def test_printing1():
check(MathMLPrinter())
@XFAIL
def test_printing2():
check(PrettyPrinter())
#================== series ======================
from sympy.series.limits import Limit
from sympy.series.order import Order
def test_series():
e = Symbol("e")
x = Symbol("x")
for c in (Limit, Limit(e, x, 1), Order, Order(e)):
check(c)
#================== concrete ==================
from sympy.concrete.products import Product
from sympy.concrete.summations import Sum
def test_concrete():
x = Symbol("x")
for c in (Product, Product(x, (x, 2, 4)), Sum, Sum(x, (x, 2, 4))):
check(c)
| |
"""
This module contains query handlers responsible for calculus queries:
infinitesimal, finite, etc.
"""
from __future__ import print_function, division
from sympy.logic.boolalg import conjuncts
from sympy.assumptions import Q, ask
from sympy.assumptions.handlers import CommonHandler
class AskInfinitesimalHandler(CommonHandler):
"""
Handler for key 'infinitesimal'
Test that a given expression is equivalent to an infinitesimal
number
"""
@staticmethod
def Symbol(expr, assumptions):
return expr.is_zero
@staticmethod
def _number(expr, assumptions):
# helper method
return expr.evalf() == 0
@staticmethod
def Basic(expr, assumptions):
if expr.is_number:
return AskInfinitesimalHandler._number(expr, assumptions)
@staticmethod
def Mul(expr, assumptions):
"""
Infinitesimal*Bounded -> Infinitesimal
"""
if expr.is_number:
return AskInfinitesimalHandler._number(expr, assumptions)
result = False
for arg in expr.args:
if ask(Q.infinitesimal(arg), assumptions):
result = True
elif ask(Q.finite(arg), assumptions):
continue
else:
break
else:
return result
Add, Pow = [Mul]*2
@staticmethod
def Number(expr, assumptions):
return expr == 0
NumberSymbol = Number
ImaginaryUnit = staticmethod(CommonHandler.AlwaysFalse)
class AskFiniteHandler(CommonHandler):
"""
Handler for key 'finite'.
Test that an expression is bounded respect to all its variables.
Examples of usage:
>>> from sympy import Symbol, Q
>>> from sympy.assumptions.handlers.calculus import AskFiniteHandler
>>> from sympy.abc import x
>>> a = AskFiniteHandler()
>>> a.Symbol(x, Q.positive(x)) == None
True
>>> a.Symbol(x, Q.finite(x))
True
"""
@staticmethod
def Symbol(expr, assumptions):
"""
Handles Symbol.
Examples
========
>>> from sympy import Symbol, Q
>>> from sympy.assumptions.handlers.calculus import AskFiniteHandler
>>> from sympy.abc import x
>>> a = AskFiniteHandler()
>>> a.Symbol(x, Q.positive(x)) == None
True
>>> a.Symbol(x, Q.finite(x))
True
"""
if expr.is_finite is not None:
return expr.is_finite
if Q.finite(expr) in conjuncts(assumptions):
return True
return None
@staticmethod
def Add(expr, assumptions):
"""
Return True if expr is bounded, False if not and None if unknown.
Truth Table:
+-------+-----+-----------+-----------+
| | | | |
| | B | U | ? |
| | | | |
+-------+-----+---+---+---+---+---+---+
| | | | | | | | |
| | |'+'|'-'|'x'|'+'|'-'|'x'|
| | | | | | | | |
+-------+-----+---+---+---+---+---+---+
| | | | |
| B | B | U | ? |
| | | | |
+---+---+-----+---+---+---+---+---+---+
| | | | | | | | | |
| |'+'| | U | ? | ? | U | ? | ? |
| | | | | | | | | |
| +---+-----+---+---+---+---+---+---+
| | | | | | | | | |
| U |'-'| | ? | U | ? | ? | U | ? |
| | | | | | | | | |
| +---+-----+---+---+---+---+---+---+
| | | | | |
| |'x'| | ? | ? |
| | | | | |
+---+---+-----+---+---+---+---+---+---+
| | | | |
| ? | | | ? |
| | | | |
+-------+-----+-----------+---+---+---+
* 'B' = Bounded
* 'U' = Unbounded
* '?' = unknown boundedness
* '+' = positive sign
* '-' = negative sign
* 'x' = sign unknown
|
* All Bounded -> True
* 1 Unbounded and the rest Bounded -> False
* >1 Unbounded, all with same known sign -> False
* Any Unknown and unknown sign -> None
* Else -> None
When the signs are not the same you can have an undefined
result as in oo - oo, hence 'bounded' is also undefined.
"""
sign = -1 # sign of unknown or infinite
result = True
for arg in expr.args:
_bounded = ask(Q.finite(arg), assumptions)
if _bounded:
continue
s = ask(Q.positive(arg), assumptions)
# if there has been more than one sign or if the sign of this arg
# is None and Bounded is None or there was already
# an unknown sign, return None
if sign != -1 and s != sign or \
s is None and (s == _bounded or s == sign):
return None
else:
sign = s
# once False, do not change
if result is not False:
result = _bounded
return result
@staticmethod
def Mul(expr, assumptions):
"""
Return True if expr is bounded, False if not and None if unknown.
Truth Table:
+---+---+---+--------+
| | | | |
| | B | U | ? |
| | | | |
+---+---+---+---+----+
| | | | | |
| | | | s | /s |
| | | | | |
+---+---+---+---+----+
| | | | |
| B | B | U | ? |
| | | | |
+---+---+---+---+----+
| | | | | |
| U | | U | U | ? |
| | | | | |
+---+---+---+---+----+
| | | | |
| ? | | | ? |
| | | | |
+---+---+---+---+----+
* B = Bounded
* U = Unbounded
* ? = unknown boundedness
* s = signed (hence nonzero)
* /s = not signed
"""
result = True
for arg in expr.args:
_bounded = ask(Q.finite(arg), assumptions)
if _bounded:
continue
elif _bounded is None:
if result is None:
return None
if ask(Q.nonzero(arg), assumptions) is None:
return None
if result is not False:
result = None
else:
result = False
return result
@staticmethod
def Pow(expr, assumptions):
"""
Unbounded ** NonZero -> Unbounded
Bounded ** Bounded -> Bounded
Abs()<=1 ** Positive -> Bounded
Abs()>=1 ** Negative -> Bounded
Otherwise unknown
"""
base_bounded = ask(Q.finite(expr.base), assumptions)
exp_bounded = ask(Q.finite(expr.exp), assumptions)
if base_bounded is None and exp_bounded is None: # Common Case
return None
if base_bounded is False and ask(Q.nonzero(expr.exp), assumptions):
return False
if base_bounded and exp_bounded:
return True
if (abs(expr.base) <= 1) == True and ask(Q.positive(expr.exp), assumptions):
return True
if (abs(expr.base) >= 1) == True and ask(Q.negative(expr.exp), assumptions):
return True
if (abs(expr.base) >= 1) == True and exp_bounded is False:
return False
return None
@staticmethod
def log(expr, assumptions):
return ask(Q.finite(expr.args[0]), assumptions)
exp = log
cos, sin, Number, Pi, Exp1, GoldenRatio, ImaginaryUnit, sign = \
[staticmethod(CommonHandler.AlwaysTrue)]*8
Infinity, NegativeInfinity = [staticmethod(CommonHandler.AlwaysFalse)]*2
| |
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Django settings for deploy_board project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import logging
logger = logging.getLogger(__name__)
BASE_DIR = os.path.dirname(__file__)
PROJECT_PATH = BASE_DIR
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("SECRET_KEY", None)
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
# OAuth
OAUTH_ENABLED_STR = os.getenv("OAUTH_ENABLED", "OFF")
if OAUTH_ENABLED_STR == "OFF":
OAUTH_ENABLED = False
else:
OAUTH_ENABLED = True
OAUTH_CLIENT_ID = os.getenv("OAUTH_CLIENT_ID")
OAUTH_CLIENT_SECRET = os.getenv("OAUTH_CLIENT_SECRET")
OAUTH_CALLBACK = os.getenv("OAUTH_CALLBACK")
OAUTH_DOMAIN = os.getenv("OAUTH_DOMAIN")
OAUTH_CLIENT_TYPE = os.getenv("OAUTH_CLIENT_TYPE")
OAUTH_USER_INFO_URI = os.getenv("OAUTH_USER_INFO_URI")
OAUTH_USER_INFO_KEY = os.getenv("OAUTH_USER_INFO_KEY")
OAUTH_ACCESS_TOKEN_URL = os.getenv("OAUTH_ACCESS_TOKEN_URL")
OAUTH_AUTHORIZE_URL = os.getenv("OAUTH_AUTHORIZE_URL")
OAUTH_DEFAULT_SCOPE = os.getenv("OAUTH_DEFAULT_SCOPE")
OAUTH_USERNAME_INFO_KEY = os.getenv("OAUTH_USERNAME_INFO_KEY")
OAUTH_EXTRACT_USERNAME_FROM_EMAIL = os.getenv("OAUTH_EXTRACT_USERNAME_FROM_EMAIL")
# Teletraan backend service url
TELETRAAN_SERVICE_URL = os.getenv("TELETRAAN_SERVICE_URL")
TELETRAAN_SERVICE_VERSION = os.getenv("TELETRAAN_SERVICE_VERSION")
TELETRAAN_SERVICE_FIXED_OAUTH_TOKEN = os.getenv("TELETRAAN_SERVICE_FIXED_OAUTH_TOKEN", None)
TELETRAAN_HOST_INFORMATION_URL = os.getenv("HOST_INFORMATION_URL")
# CMDB vars
CMDB_API_HOST = os.getenv("CMDB_API_HOST", "http://localhost:8080/")
CMDB_INSTANCE_URL = os.getenv("CMDB_INSTANCE_URL", "api/cmdb/getinstance/")
CMDB_UI_HOST = os.getenv("CMDB_UI_HOST", "localhost")
PHOBOS_URL = os.getenv("PHOBOS_URL")
# Serviceframework add-on vars
SERVICE_RATELIMIT_CONFIG_URL = os.getenv("SERVICE_RATELIMIT_CONFIG_URL")
STATSBOARD_API_FORMAT = os.getenv("STATSBOARD_API_FORMAT", "OFF")
RATELIMIT_ENABLED_METRIC_FORMAT = os.getenv("RATELIMIT_ENABLED_METRIC_FORMAT", "OFF")
ENABLING_SERVICE_RATELIMIT_URL = os.getenv("ENABLING_SERVICE_RATELIMIT_URL", "OFF")
KAFKA_MSGS_DELIVERED_METRIC = os.getenv("KAFKA_MSGS_DELIVERED_METRIC", "OFF")
DASHBOARD_URL_ENDPOINT_FORMAT = os.getenv("DASHBOARD_URL_ENDPOINT_FORMAT","OFF")
# For rolling out new features
GUINEA_PIG_ENVS = os.getenv("GUINEA_PIG_ENVS", "").split(",")
KAFKA_LOGGING_ADD_ON_ENVS = os.getenv("KAFKA_LOGGING_ADD_ON_ENVS", "").split(",")
LOG_DIR = os.getenv("LOG_DIR")
LOG_LEVEL = os.getenv("LOG_LEVEL")
# Change to your domain or hosts
if LOG_LEVEL == 'DEBUG':
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
else:
ALLOWED_HOSTS = ['*']
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'level': LOG_LEVEL,
'class': 'logging.handlers.RotatingFileHandler',
'filename': '%s/service.log' % LOG_DIR,
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter': 'standard',
},
'request_handler': {
'level': LOG_LEVEL,
'class': 'logging.handlers.RotatingFileHandler',
'filename': '%s/access.log' % LOG_DIR,
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter': 'standard',
},
'console': {
'level': LOG_LEVEL,
'class': 'logging.StreamHandler',
},
},
'loggers': {
'': {
'handlers': ['default', 'console'],
'level': LOG_LEVEL,
'propagate': True
},
'django.request': {
'handlers': ['request_handler'],
'level': LOG_LEVEL,
'propagate': False
},
}
}
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'deploy_board.webapp',
)
oauth_middleware = 'deploy_board.webapp.security.DelegatedOAuthMiddleware'
if TELETRAAN_SERVICE_FIXED_OAUTH_TOKEN:
oauth_middleware = 'deploy_board.webapp.security.FixedOAuthMiddleware'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
oauth_middleware,
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'deploy_board.webapp.error_views.ExceptionHandlerMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request",
)
ROOT_URLCONF = 'deploy_board.urls'
WSGI_APPLICATION = 'deploy_board.wsgi.application'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# The number of days since the build publish date required to trigger an old build version warning message
OLD_BUILD_WARNING_THRESHOLD_DAYS = 10
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_PATH, "static"),
)
#STATIC_ROOT = os.path.join(PROJECT_PATH, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Site global metrics
SITE_METRICS_CONFIGS = []
# Deep Teletraan backend health check url
TELETRAAN_SERVICE_HEALTHCHECK_URL = os.getenv("TELETRAAN_SERVICE_HEALTHCHECK_URL", None)
# Show hosts that are STOPPING or STOPPED in the environments page
DISPLAY_STOPPING_HOSTS = os.getenv("DISPLAY_STOPPING_HOSTS", "true")
# Pinterest specific settings
IS_PINTEREST = True if os.getenv("IS_PINTEREST", "false") == "true" else False
BUILD_URL = "https://jenkins.pinadmin.com/job/"
TELETRAAN_DISABLE_CREATE_ENV_PAGE = True if os.getenv("TELETRAAN_DISABLE_CREATE_ENV_PAGE", "false") == "true" else False
TELETRAAN_REDIRECT_CREATE_ENV_PAGE_URL = os.getenv("TELETRAAN_REDIRECT_CREATE_ENV_PAGE_URL", None)
IS_DURING_CODE_FREEZE = True if os.getenv("TELETRAAN_CODE_FREEZE", "false") == "true" else False
TELETRAAN_CODE_FREEZE_URL = os.getenv("TELETRAAN_CODE_FREEZE_URL", None)
TELETRAAN_JIRA_SOURCE_URL = os.getenv("TELETRAAN_JIRA_SOURCE_URL", None)
# use Rodimus if present
RODIMUS_SERVICE_URL = os.getenv("RODIMUS_SERVICE_URL", None)
RODIMUS_SERVICE_VERSION = os.getenv("RODIMUS_SERVICE_VERSION", None)
if IS_PINTEREST:
# use knox if present
KNOX_SESSION_ID = os.getenv("KNOX_SESSION_ID")
if KNOX_SESSION_ID:
from knox import Knox
SECRET_KEY = Knox().get_primary(KNOX_SESSION_ID)
ADMIN_OAUTH_SECRET_KNOX_ID = os.getenv("ADMIN_OAUTH_SECRET_KNOX_ID")
if ADMIN_OAUTH_SECRET_KNOX_ID:
from knox import Knox
OAUTH_CLIENT_SECRET = Knox().get_primary(ADMIN_OAUTH_SECRET_KNOX_ID)
# Site health metrics
REQUESTS_URL = os.getenv("REQUESTS_URL")
SUCCESS_RATE_URL = os.getenv("SUCCESS_RATE_URL")
LATENCY_URL = os.getenv("LATENCY_URL")
SITE_METRICS_CONFIGS = [
{"title": "Requests", "url": REQUESTS_URL,
"specs": [{"min": 0, "max": 50000, "color": "Red"},
{"min": 50000, "max": 80000, "color": "Yellow"},
{"min": 80000, "max": 200000, "color": "Green"}]},
{"title": "Success", "url": SUCCESS_RATE_URL,
"specs": [{"min": 90, "max": 98, "color": "Red"},
{"min": 98, "max": 99, "color": "Yellow"},
{"min": 99, "max": 100, "color": "Green"}]},
{"title": "Latency", "url": LATENCY_URL,
"specs": [{"min": 800, "max": 1000, "color": "Red"},
{"min": 600, "max": 800, "color": "Yellow"},
{"min": 300, "max": 600, "color": "Green"}]}
]
# Pinterest ngapp2 status file
NGAPP_PRE_DEPLOY_STATUS_NODE = "varnish_pre_deploy_status"
NGAPP_POST_DEPLOY_STATUS_NODE = "varnish_post_deploy_status"
NGAPP_ROLLBACK_STATUS_NODE = "varnish_rollback_status"
NGAPP_DEPLOY_CHANNEL = "deploys"
DEFAULT_START_TIME = "-1d"
#Pinterest Default Cloud Provider
DEFAULT_PROVIDER = 'AWS'
#Pinterest Default AMI image name
DEFAULT_CMP_IMAGE = 'cmp_base'
#Pinterest Default Host Type
DEFAULT_CMP_HOST_TYPE = 'ComputeLo(new Gen)'
DEFAULT_CELL = 'aws-us-east-1'
DEFAULT_PLACEMENT = os.getenv('DEFAULT_CMP_PLACEMENT')
#Pinterest Default Puppet Environment
DEFAULT_CMP_PINFO_ENVIRON = os.getenv('DEFAULT_CMP_PINFO_ENVIRON')
DEFAULT_CMP_ACCESS_ROLE = os.getenv('DEFAULT_CMP_ACCESS_ROLE')
| |
from __future__ import unicode_literals
import json
from django.contrib.auth.models import User
from django.db import IntegrityError, transaction
from django.http import (HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden)
from django.shortcuts import get_object_or_404, render
from django.utils import six
from django.views.generic import View
from djblets.avatars.services import URLAvatarService
from djblets.db.query import get_object_or_none
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.admin.server import get_server_url
from reviewboard.avatars import avatar_services
from reviewbotext.extension import ReviewBotExtension
def _serialize_user(request, user):
"""Serialize a user into a JSON-encodable format.
Args:
request (django.http.HttpRequest):
The HTTP request.
user (django.contrib.auth.models.User):
The user to serialize.
Returns:
dict:
A dictionary of data to be encoded and sent back to the client.
"""
if user:
service = avatar_services.for_user(user)
if service:
avatar_url = service.get_avatar_urls(request, user, 48)['1x']
else:
avatar_url = None
return {
'avatar_url': avatar_url,
'id': user.id,
'fullname': user.get_full_name(),
'username': user.username,
}
else:
return None
class ConfigureView(View):
"""The basic "Configure" page for Review Bot."""
template_name = 'reviewbot/configure.html'
def get(self, request):
"""Render and return the admin page.
Args:
request (django.http.HttpRequest):
The HTTP request.
Returns:
django.http.HttpResponse:
The response.
"""
if not request.user.is_superuser:
# TODO: Once we move to Django 1.9+, we can switch to the new
# access mixin methods instead of testing this ourselves. Here and
# below in the other views in this file.
return HttpResponseForbidden()
extension = ReviewBotExtension.instance
user = get_object_or_none(User, pk=extension.settings.get('user'))
return render(request, self.template_name, {
'extension': extension,
'reviewbot_user': user,
})
def post(self, request):
"""Save the extension configuration.
Args:
request (django.http.HttpRequest):
The HTTP request, including POSTed data.
Returns:
django.http.HttpResponse:
The response. The body of the response is a JSON-encoded blob which
indicates success or failure, and in the success case, includes the
the current configuration.
"""
if not request.user.is_superuser:
return HttpResponseForbidden()
extension = ReviewBotExtension.instance
should_save = False
new_user = request.POST.get('reviewbot_user')
if new_user:
try:
user = User.objects.get(pk=new_user)
except User.DoesNotExist:
# TODO: return which field was invalid
return HttpResponseBadRequest(
json.dumps({
'result': 'error',
'field': 'user',
'error': 'The specified user does not exist.',
}),
content_type='application/json')
extension.settings['user'] = user.pk
should_save = True
else:
user = get_object_or_none(User, pk=extension.settings.get('user'))
if 'reviewbot_broker_url' in request.POST:
broker_url = request.POST['reviewbot_broker_url']
extension.settings['broker_url'] = broker_url
should_save = True
else:
broker_url = extension.settings.get('broker_url', '')
if should_save:
extension.settings.save()
return HttpResponse(
json.dumps({
'result': 'success',
'broker_url': broker_url,
'user': _serialize_user(request, user),
}),
content_type='application/json')
class ConfigureUserView(View):
"""An endpoint for setting the user for Review Bot."""
def get(self, request):
"""Return the configured user.
Args:
request (django.http.HttpRequest):
The HTTP request.
Returns:
django.http.HttpResponse:
A response containing the currently-configured user.
"""
if not request.user.is_superuser:
return HttpResponseForbidden()
extension = ReviewBotExtension.instance
user = get_object_or_404(User, pk=extension.settings.get('user'))
return HttpResponse(json.dumps(_serialize_user(request, user)),
content_type='application/json')
def post(self, request):
"""Create a new user for Review Bot.
Args:
request (django.http.HttpRequest):
The HTTP request.
Returns:
django.http.HttpResponse:
A response containing the newly-configured user.
"""
if not request.user.is_superuser:
return self.get_no_access_error(request)
siteconfig = SiteConfiguration.objects.get_current()
noreply_email = siteconfig.get('mail_default_from')
extension = ReviewBotExtension.instance
try:
with transaction.atomic():
user = User.objects.create(username='reviewbot',
email=noreply_email,
first_name='Review',
last_name='Bot')
profile = user.get_profile()
profile.should_send_email = False
profile.save()
avatar_service = avatar_services.get_avatar_service(
URLAvatarService.avatar_service_id)
extension = ReviewBotExtension.instance
avatar_service.setup(
user,
{
'1x': extension.get_static_url(
'images/reviewbot.png'),
'2x': extension.get_static_url(
'images/reviewbot@2x.png'),
})
except IntegrityError:
return HttpResponseBadRequest()
extension.settings['user'] = user.pk
extension.settings.save()
return HttpResponse(json.dumps(_serialize_user(request, user)),
content_type='application/json')
class WorkerStatusView(View):
"""An "API" to get worker status.
This view is an internal API to query the workers and return their status.
"""
def get(self, request):
"""Query workers and return their status.
Args:
request (django.http.HttpRequest):
The HTTP request.
Returns:
django.http.HttpResponse:
The response.
"""
extension = ReviewBotExtension.instance
response = {}
if extension.is_configured:
try:
payload = {
'session': extension.login_user(),
'url': get_server_url(),
}
reply = extension.celery.control.broadcast('update_tools_list',
payload=payload,
reply=True,
timeout=10)
response = {
'state': 'success',
'hosts': [
{
'hostname': hostname.split('@', 1)[1],
'tools': data['tools'],
}
for item in reply
for hostname, data in six.iteritems(item)
],
}
except IOError as e:
response = {
'state': 'error',
'error': 'Unable to connect to broker: %s.' % e,
}
else:
response = {
'state': 'error',
'error': 'Review Bot is not yet configured.',
}
return HttpResponse(json.dumps(response),
content_type='application/json')
| |
import os
import glob
import warnings
import re
import atexit
class NoSuchSensorError(Exception):
def __init__(self, port, name=None):
self.port = port
self.name = name
def __str__(self):
return "No such sensor port=%d name=%s" % (self.port, self.name)
class Ev3StringType(object):
@staticmethod
def post_read(value):
return value
@staticmethod
def pre_write(value):
return value
class Ev3IntType(object):
@staticmethod
def post_read(value):
return int(value)
@staticmethod
def pre_write(value):
return str(value)
class create_ev3_property(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
for name, args in self.kwargs.items():
def ev3_property(name, read_only=False, write_only=False, flush_on_write=False, property_type=Ev3StringType):
def fget(self):
if not write_only:
return property_type.post_read(self.read_value(name))
else:
return None
def fset(self, value):
self.write_value(
name, property_type.pre_write(value), flush_on_write)
return property(fget, None if read_only else fset)
setattr(cls, name, ev3_property(name, **args))
return cls
@create_ev3_property(
bin_data={'read_only': True},
bin_data_format={'read_only': True},
decimals={'read_only': True},
#mode={ 'read_only': False},
fw_version={'read_only': True},
modes={'read_only': True},
name={'read_only': True},
port_name={'read_only': True},
uevent={'read_only': True},
units={'read_only': True},
value0={'read_only': True, 'property_type': Ev3IntType},
value1={'read_only': True, 'property_type': Ev3IntType},
value2={'read_only': True, 'property_type': Ev3IntType},
value3={'read_only': True, 'property_type': Ev3IntType},
value4={'read_only': True, 'property_type': Ev3IntType},
value5={'read_only': True, 'property_type': Ev3IntType},
value6={'read_only': True, 'property_type': Ev3IntType},
value7={'read_only': True, 'property_type': Ev3IntType}
)
class LegoSensor():
def __init__(self,port=-1,name=None):
self.sys_path = ""
sensor_existing = False
if name != None and port == -1:
for p in glob.glob('/sys/class/lego-sensor/sensor*/uevent'):
with open(p) as f:
port_name = None
for value in f:
if (value.strip().lower().startswith('LEGO_ADDRESS=in'.lower())):
port_name = value.strip()[-1]
if sensor_existing:
break
if (value.strip().lower() == ('LEGO_DRIVER_NAME=' + name).lower()):
self.sys_path = os.path.dirname(p)
sensor_existing = True
if port_name is not None:
break
print(port_name)
if sensor_existing:
self.port = int(port_name)
break
"""
#FOR JETSON IR_control.py ONLY!
if (not sensor_existing):
raise NoSuchSensorError(port, name)
"""
self._mode = self.read_value('mode')
def read_value(self, name):
attr_file = os.path.join(self.sys_path, name)
if os.path.isfile(attr_file):
with open(attr_file) as f:
value = f.read().strip()
return value
else:
return None
def write_value(self, name, value, flush = False):
attr_file = os.path.join(self.sys_path, name)
if os.path.isfile(attr_file):
with open(attr_file, 'w') as f:
f.write(str(value))
if flush:
f.flush()
else:
return
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
if (self._mode != value):
self._mode = value
self.write_value('mode', value)
def mode_force_flush(self, value):
self._mode = value
self.write_value('mode', value)
class InfraredSensor(LegoSensor):
def __init__(self, port=-1):
LegoSensor.__init__(self, port, name='lego-ev3-ir')
self._channel = -1
self._cmd = -1
class REMOTE:
"""Button values for the `remote` property."""
NONE = 0
RED_UP = 1
RED_DOWN = 2
BLUE_UP = 3
BLUE_DOWN = 4
RED_UP_AND_BLUE_UP = 5
RED_UP_AND_BLUE_DOWN = 6
RED_DOWN_AND_BLUE_UP = 7
RED_DOWN_AND_BLUE_DOWN = 8
BAECON_MODE_ON = 9
RED_UP_AND_RED_DOWN = 10
BLUE_UP_AND_BLUE_DOWN = 11
@property
def remote(self):
"""IR remote control mode. A tuple of recieved value for each of the 4
channels.
"""
self.mode = 'IR-REMOTE'
return self.value0, self.value1, self.value2, self.value3
@property
def prox(self):
"""Proximity mode. Distance in percent (100% is about 70cm)."""
self.mode = 'IR-PROX'
return self.value0
def get_channel(self):
return self._channel
def set_channel(self,value):
self._channel = value
channel = property(get_channel,set_channel,'channel')
def get_cmd(self):
return self._cmd
def set_cmd(self,value):
self._cmd = value
cmd = property(get_cmd,set_cmd,'cmd')
def get_IR_cmd(ir):
ir_input = list(ir.remote)
channel = [i for i, e in enumerate(ir_input) if e != 0]
if channel==[]:
return (-1,-1,0)
ir_command = ir_input[channel[0]]
tup = (channel[0],ir_command,1)
ir.set_channel(int(channel[0]))
ir.set_cmd(int(ir_command))
return tup
| |
"""
aRMSD plot functions
(c) 2017 by Arne Wagner
"""
# Authors: Arne Wagner
# License: MIT
from __future__ import absolute_import, division, print_function
from builtins import range
import sys
try:
import numpy as np
except ImportError:
pass
try:
from vtk import (vtkCellPicker, vtkSphereSource, vtkLineSource, vtkTubeFilter, vtkPolyDataMapper, vtkActor,
vtkRenderer, vtkRenderWindow, vtkRenderWindowInteractor, vtkRenderLargeImage, vtkPNGWriter,
vtkWindowToImageFilter, vtkCamera, vtkVectorText, vtkFollower, vtkArrowSource, vtkCubeSource,
vtkLegendBoxActor, vtkMath, vtkMatrix4x4, vtkTransformPolyDataFilter, vtkTransform, vtkLookupTable,
vtkScalarBarActor, vtkScalarBarWidget, vtkInteractorStyleTrackballCamera, vtkProperty,
vtkPropPicker, VTK_VERSION)
has_vtk, vtk_version = True, VTK_VERSION
except ImportError:
has_vtk = False
vtk_version = 'Module not available'
try:
import matplotlib as mpl
has_mpl, mpl_version = True, mpl.__version__
if sys.version_info <= (3,0):
mpl.use('QT4Agg') # Set MPL backend to QT4Agg
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
except ImportError:
has_mpl = False
mpl_version = 'Module not available'
try:
from uncertainties import unumpy as unp
from uncertainties import ufloat
has_uc = True
except ImportError:
try:
import unumpycore as unp
from ucore import ufloat, ufloat_fromstr
except ImportError:
pass
# Matplotlib/pyplot settings, Set Backend to QT4Agg
# C:\Python\Lib\site-packages\matplotlib\mpl-data\matplotlibrc
almost_black = '#262626'
mpl.rcParams['savefig.dpi'] = 600
mpl.rcParams['savefig.bbox'] = 'tight'
mpl.rcParams['axes.edgecolor'] = almost_black
mpl.rcParams['axes.labelcolor'] = almost_black
# Copy structural properties from core module
def geo_distance(xyz1, xyz2):
""" Global function for distance calculation - compatible with uncertainties
coordinates are assumed to be uarrays """
return np.sum((xyz1 - xyz2)**2)**0.5
def geo_angle(xyz1, xyz2, xyz3):
""" Global function for angle calculation - compatible with uncertainties
coordinates are assumed to be uarrays """
v1, v2 = xyz1 - xyz2, xyz3 - xyz2
dv1_dot_dv2 = np.sum(v1**2)**0.5 * np.sum(v2**2)**0.5
return (180.0/np.pi) * unp.arccos(np.dot(v1, v2) / dv1_dot_dv2)
def geo_torsion(xyz1, xyz2, xyz3, xyz4):
""" Global function for torsion calculation - compatible with uncertainties
coordinates are assumed to be uarrays """
b0 = -1.0 * (xyz2 - xyz1)
b1 = xyz3 - xyz2
b2 = xyz4 - xyz3
b0xb1, b1xb2 = np.cross(b0, b1), np.cross(b2, b1) # Planes defined by the vectors
b0xb1_x_b1xb2 = np.cross(b0xb1, b1xb2)
y = np.dot(b0xb1_x_b1xb2, b1) * (1.0 / np.sum(b1**2)**0.5)
x = np.dot(b0xb1, b1xb2)
return np.abs((180.0/np.pi) * unp.arctan2(y, x)) # Ignore sign of the dihedral angle
###############################################################################
# VTK ROUTINES
###############################################################################
class aRMSD_substructure_picker(vtkInteractorStyleTrackballCamera):
""" Class for the fractional coordinates / aRMSD substructure selection """
def __init__(self, settings, atoms_to_pick, align, plot_type, picker_type):
""" Initializes the picker interactor """
self.plot_type = plot_type
self.AddObserver('LeftButtonPressEvent', self.leftButtonPressEvent)
# Arrays for picked atoms and actors
self.PickedAtoms, self.PickedActors = np.array([], dtype=np.int), np.array([], dtype=np.int)
self.LastPickedActor = None
self.LastPickedProperty = vtkProperty()
self.actors_to_pick = np.asarray(atoms_to_pick)
self.picker_color = settings.picker_col_rgb
self.picker_type = picker_type
self.NewPickedActor = None
self.sym_idf = align.sym_idf
self.bnd_idx = align.bnd_idx
self.colors = align.col_glob_rgb
def full_connects(self, idx):
""" Determines the all positions ultimately attached to the given atom """
def _is_connected_to(idx):
""" Determines the connections of the given index """
ravel_bnds = np.ravel(self.bnd_idx[np.where(self.bnd_idx == idx)[0]])
pos = np.where(ravel_bnds != idx)[0]
return ravel_bnds[pos]
# Set up initial connection array and evaluate first index
connection_array = np.asarray(idx, dtype=np.int)
connection_array = np.unique(np.hstack((connection_array, _is_connected_to(idx))))
checked_pos = [idx] # This list contains all positions that have been checked
if len(connection_array) == 1: # No atoms are connected to the picked one
pass
else:
while True: # Stay in this loop until no additional indices are added
old_len = len(connection_array)
for pos in connection_array:
if pos not in checked_pos: # Evaluate only once
connection_array = np.unique(np.hstack((connection_array, _is_connected_to(pos))))
checked_pos.append(pos)
new_len = len(connection_array)
if new_len == old_len: # Exit loop if no changes occurred after all position were checked
break
return connection_array
def click_message(self, sym_idf, picker_type):
""" Message displayed to user when an atom is clicked """
if self.plot_type == 'substructure':
print("> Atom "+str(sym_idf)+" has been added to 'substructure 1' ...")
elif self.plot_type == 'fractional':
if picker_type == 'cluster':
print("> All atoms connected to "+str(sym_idf)+" will be removed ...")
else:
print("> Atom "+str(sym_idf)+" will be removed ...")
def second_click_message(self, sym_idf, picker_type):
""" Message displayed to user when a selected atom is clicked """
if self.plot_type == 'substructure':
print("> Atom "+str(sym_idf)+" has been removed from 'substructure 1' ...")
elif self.plot_type == 'fractional':
if picker_type == 'cluster':
print("> Removal of all atoms connected to "+str(sym_idf)+" was cancelled ...")
else:
print("> Removal of atom "+str(sym_idf)+" was cancelled ...")
def leftButtonPressEvent(self, obj, event):
""" Event that will happen on left mouse click """
clickPos = self.GetInteractor().GetEventPosition() # Get the clicked position
picker = vtkPropPicker()
picker.Pick(clickPos[0], clickPos[1], 0, self.GetDefaultRenderer())
self.NewPickedActor = picker.GetActor() # Get the actual actor on the clicked position
# If an actor/atom has been selected (only selective pick events via actors_to_pick)
if self.NewPickedActor is not None and self.NewPickedActor in self.actors_to_pick:
atom_idx = int(np.where(self.NewPickedActor == self.actors_to_pick)[0]) # Index of the atom
if atom_idx not in self.PickedAtoms: # Select only if it wasn't selected so far
# Highlight the atom by changing the color
self.click_message(self.sym_idf[atom_idx], self.picker_type)
self.NewPickedActor.GetProperty().SetColor(self.picker_color)
if self.picker_type == 'cluster':
all_positions = self.full_connects(atom_idx)
self.PickedActors = np.unique(np.append(self.PickedActors, self.actors_to_pick[all_positions]))
self.PickedAtoms = np.unique(np.append(self.PickedAtoms, all_positions))
# Change colors for all atoms
[actor.GetProperty().SetColor(self.picker_color) for actor in self.PickedActors]
else:
self.PickedActors = np.unique(np.append(self.PickedActors, self.actors_to_pick[atom_idx]))
self.PickedAtoms = np.unique(np.append(self.PickedAtoms, atom_idx))
else: # Remove duplicates
self.second_click_message(self.sym_idf[atom_idx], self.picker_type)
if self.picker_type == 'cluster': # Change all connected atoms
all_positions = self.full_connects(atom_idx)
pos_in_picked_atoms = np.ravel(np.asarray([np.where(self.PickedAtoms == pos)[0]
for pos in all_positions]))
self.PickedActors = np.unique(np.asarray([np.delete(self.PickedActors, np.where(self.PickedActors == self.actors_to_pick[pos])[0]) for pos in all_positions])) # Remove actor from array
self.PickedAtoms = np.unique(np.delete(self.PickedAtoms, pos_in_picked_atoms, axis=0)) # Remove atomic index from index array
[actor.GetProperty().SetColor(self.colors) for actor in self.PickedActors] # Change colors for all atoms
else:
self.PickedActors = np.unique(np.delete(self.PickedActors, np.where(self.PickedActors == self.actors_to_pick[atom_idx])[0])) # Remove actor from array
self.PickedAtoms = np.unique(np.delete(self.PickedAtoms, np.where(self.PickedAtoms == atom_idx)[0])) # Remove atomic index from index array
self.NewPickedActor.GetProperty().SetColor(self.colors) # Reset the color to the initial value
self.OnLeftButtonDown()
return
# ---------------------------------------------------------------------------------
class aRMSD_plot_picker(vtkInteractorStyleTrackballCamera):
""" Class for picking events in the aRMSD plot """
def __init__(self, settings, atoms_to_pick, align):
""" Initializes the picker interactor """
self.AddObserver('LeftButtonPressEvent', self.leftButtonPressEvent)
self.PickedAtoms, self.PickedActors = [], [] # Lists for picked atoms and actors
self.LastPickedActor = None
self.LastPickedProperty = vtkProperty()
self.actors_to_pick = np.asarray(atoms_to_pick)
self.picker_color = settings.picker_col_rgb
self.std_type = settings.std_type
self.calc_prec = settings.calc_prec
self.use_std = settings.use_std
self.sym_idf = align.sym_idf
self.coords = align.cor
self.coords_mol1 = align.cor_mol1_kbs
self.coords_mol2 = align.cor_mol2_kbs
self.coords_std_mol1 = align.cor_mol1_kbs_std
self.coords_std_mol2 = align.cor_mol2_kbs_std
self.colors = align.col_at_rgb
self.name_mol1 = align.name1
self.name_mol2 = align.name2
self.RMSD_per_atom = align.msd_sum**0.5
self.rmsd_perc = (align.msd_sum / np.sum(align.msd_sum)) * 100 # Contribution of individual atom types
def calc_picker_property(self, list_of_picks):
""" Calculates distances, angles or dihedral angles with or without uncertainties """
def _proper_std(stds, list_of_picks):
if self.std_type == 'simple': # Check only if stds exist
return True
else: # H/and some heavy atoms may have no stds
return 0.0 not in np.sum(stds[np.asarray(list_of_picks)], axis=1)
def _per_mol(coords, stds):
""" Calculate for one molecule """
if self.use_std: # Combine coordinates and uncertainties to array
xyz = unp.uarray(coords[np.asarray(list_of_picks)], stds[np.asarray(list_of_picks)])
else:
xyz = coords[np.asarray(list_of_picks)]
if len(list_of_picks) == 2: # Distance
value = geo_distance(xyz[0], xyz[1])
elif len(list_of_picks) == 3: # Angle
value = geo_angle(xyz[0], xyz[1], xyz[2])
elif len(list_of_picks) == 4: # Torsion angle
value = geo_torsion(xyz[0], xyz[1], xyz[2], xyz[3])
return ufloat(value.nominal_values, 0.0) if not _proper_std(stds, list_of_picks) else value
p1, p2 = _per_mol(self.coords_mol1, self.coords_std_mol1), _per_mol(self.coords_mol2, self.coords_std_mol2)
delta = p2 - p1
return p1, p2, delta
def calc_property(self, list_of_picks):
""" Calculates different structural properties """
def apply_format(value, n_digits):
str_len = 12
ft_str_norm = '{:3.2f}'
if n_digits != 0:
ft_str_norm = '{:'+str(n_digits)+'.'+str(n_digits)+'f}'
ft_str_unce = '{:.1uS}' # One digit for values with uncertainties
if self.use_std: # If standard deviations exist
if value.std_dev == 0.0 or n_digits == 0: # Different format for values without standard deviations
if n_digits == 0:
str_len = 5
add = str_len - len(ft_str_norm.format(value.nominal_value))
if n_digits == 0 and value.nominal_value < 10.0:
return '0'+ft_str_norm.format(value.nominal_value)+' '*(add-1)
else:
return ft_str_norm.format(value.nominal_value)+' '*add
else:
add = str_len - len(ft_str_unce.format(value))
return ft_str_unce.format(value)+' '*add
else: # No ufloat values
return ft_str_norm.format(value)
def print_values(values, n_digits, unit=' deg.'):
print('\n '+str(self.name_mol1)+': '+apply_format(values[0], n_digits)+unit+
'\n '+str(self.name_mol2)+': '+apply_format(values[1], n_digits)+unit+
'\t\tDiff. = '+apply_format(values[2], n_digits)+unit)
if len(list_of_picks) == 1: # Show RMSD contribution of the atom
print('\nAtom [' +str(self.sym_idf[list_of_picks[0]])+']: RMSD = '+
apply_format(self.RMSD_per_atom[list_of_picks[0]], 3)+
' Angstrom ('+apply_format(self.rmsd_perc[list_of_picks[0]], 0)+' % of the total RMSD)')
elif len(list_of_picks) == 2: # Calculate distance
d1, d2, delta = self.calc_picker_property(list_of_picks)
print('\nDistance between: ['+str(self.sym_idf[list_of_picks[0]])+' -- '+
str(self.sym_idf[list_of_picks[1]])+']')
print_values([d1, d2, delta], n_digits=5, unit=' A')
elif len(list_of_picks) == 3: # Calculate angle
a1, a2, delta = self.calc_picker_property(list_of_picks)
print('\nAngle between: ['+str(self.sym_idf[list_of_picks[0]])+' -- '+
str(self.sym_idf[list_of_picks[1]])+' -- '+str(self.sym_idf[list_of_picks[2]])+']')
print_values([a1, a2, delta], n_digits=5, unit=' deg.')
elif len(list_of_picks) == 4: # Calculate dihedral angle
t1, t2, delta = self.calc_picker_property(list_of_picks)
print('\nDihedral between: ['+str(self.sym_idf[list_of_picks[0]])+' -- '+
str(self.sym_idf[list_of_picks[1]])+' -- '+str(self.sym_idf[list_of_picks[2]])+' -- '+
str(self.sym_idf[list_of_picks[3]])+']')
print_values([t1, t2, delta], n_digits=5, unit=' deg.')
def leftButtonPressEvent(self, obj, event):
""" Event that will happen on left mouse click """
clickPos = self.GetInteractor().GetEventPosition() # Get the clicked position
picker = vtkPropPicker()
picker.Pick(clickPos[0], clickPos[1], 0, self.GetDefaultRenderer())
self.NewPickedActor = picker.GetActor() # Get the actual actor on the clicked position
# If an actor/atom has been selected (only selective pick events via actors_to_pick)
if self.NewPickedActor is not None and self.NewPickedActor in self.actors_to_pick:
atom_idx = int(np.where(self.NewPickedActor == self.actors_to_pick)[0]) # Index of the atom
if len(self.PickedAtoms) <= 3: # Maximum selection will be 4 atoms
if atom_idx not in self.PickedAtoms: # Select only if it wasn't selected so far
self.PickedActors.append(self.actors_to_pick[atom_idx])
self.PickedAtoms.append(atom_idx)
self.calc_property(self.PickedAtoms)
# Highlight the atom by changing the color
self.NewPickedActor.GetProperty().SetColor(self.picker_color)
else: # Remove duplicates
self.PickedActors.remove(self.actors_to_pick[atom_idx]) # Remove actor from list
self.PickedAtoms.remove(atom_idx) # Remove atomic index from indices list
self.calc_property(self.PickedAtoms)
# Reset the color to the initial value
self.NewPickedActor.GetProperty().SetColor(self.colors[atom_idx])
else: # Reset all colors
colors = [self.colors[index] for index in self.PickedAtoms]
[self.PickedActors[index].GetProperty().SetColor(colors[index]) for
index in range(len(self.PickedActors))]
self.PickedActors, self.PickedAtoms = [], [] # Empty the lists
self.OnLeftButtonDown()
return
class Molecular_Viewer_vtk(object):
""" A molecular viewer object based on vtk used for 3d plots """
def __init__(self, settings):
""" Initializes object and creates the renderer and camera """
self.ren = vtkRenderer()
self.ren_win = vtkRenderWindow()
self.ren_win.AddRenderer(self.ren)
self.ren.SetBackground(settings.backgr_col_rgb)
self.ren.SetUseDepthPeeling(settings.use_depth_peel)
self.title = 'aRMSD Structure Visualizer'
self.magnif = None
self.save_counts = 0
self.picker = None
# Create the active camera
self.camera = vtkCamera()
self.camera.SetPosition(np.array([0.0, 0.0, 50]))
self.ren.SetActiveCamera(self.camera)
self.bnd_eps = 1.0E-03
self.at_actors_list = [] # List of atomic actors (for pick events)
# Create a renderwindowinteractor
self.iren = vtkRenderWindowInteractor()
self.iren.SetRenderWindow(self.ren_win)
def show(self, molecule1, molecule2, settings):
""" Shows the results in a new window """
self.magnif = settings.magnif_fact # Copy magnification information from settings
# Determine file names for screenshots
if self.plot_type == 'initial':
self.png_file_name = 'VTK_initial_plot'
elif self.plot_type == 'inertia':
self.png_file_name = 'VTK_inertia_plot'
elif self.plot_type == 'aRMSD':
self.png_file_name = 'VTK_aRMSD_plot'
elif self.plot_type == 'superpos':
self.png_file_name = 'VTK_superposition_plot'
elif self.plot_type == 'substructure':
self.png_file_name = 'VTK_substructure_plot'
elif self.plot_type == 'fractional':
self.png_file_name = 'VTK_fractional_plot'
if self.has_cam_vtk: # Set camera properties (if they exist...)
self.camera.SetPosition(self.cam_vtk_pos)
self.camera.SetFocalPoint(self.cam_vtk_focal_pt)
self.camera.SetViewUp(self.cam_vtk_view_up)
self.iren.Initialize()
self.ren_win.SetSize(settings.window_size)
self.ren_win.SetWindowName(self.title)
self.iren.AddObserver('KeyPressEvent', self.keypress) # Key events for screenshots, etc.
self.ren_win.Render()
self.iren.Start()
# Determine the camera properties of the final orientation and store them
molecule1.cam_vtk_pos, molecule2.cam_vtk_pos = self.camera.GetPosition(), self.camera.GetPosition()
molecule1.cam_vtk_wxyz, molecule2.cam_vtk_wxyz = self.camera.GetOrientationWXYZ(), self.camera.GetOrientationWXYZ()
molecule1.cam_vtk_focal_pt, molecule2.cam_vtk_focal_pt = self.camera.GetFocalPoint(), self.camera.GetFocalPoint()
molecule1.cam_vtk_view_up, molecule2.cam_vtk_view_up = self.camera.GetViewUp(), self.camera.GetViewUp()
molecule1.has_cam_vtk, molecule2.has_cam_vtk = True, True
del self.ren_win, self.iren
if self.picker is not None and self.plot_type in ['substructure', 'fractional']:
return np.ravel(np.asarray(self.picker.PickedAtoms, dtype=np.int))
#self.close_window()
def close_window(self):
""" Not working, but intended to close the window """
self.ren_win.Finalize()
self.iren.TerminateApp()
def keypress(self, obj, event):
""" Function that handles key pressing events """
key = obj.GetKeySym()
if key == 's': # Screenshots
render_large = vtkRenderLargeImage()
render_large.SetInput(self.ren)
render_large.SetMagnification(self.magnif)
writer = vtkPNGWriter()
writer.SetInputConnection(render_large.GetOutputPort())
if self.save_counts == 0: # Make sure that screenshots are not overwritten by default
export_file_name = self.png_file_name+'.png'
else:
export_file_name = self.png_file_name+'_'+str(self.save_counts)+'.png'
writer.SetFileName(export_file_name)
self.ren_win.Render()
writer.Write()
print('\n> The image was saved as '+export_file_name+' !')
self.save_counts += 1 # Remember save event
del render_large
elif key == 'b': # Add or remove a bond
pass
elif key == 'h': # Display help
print("\n> Press the 's' button to save the scene as .png file")
def add_principal_axes(self, com, pa, length, col, settings):
""" Adds the principal axes of rotation to the view """
startPoint, endPoint = com*settings.scale_glob, (pa*2 + com)*settings.scale_glob
normalizedX, normalizedY, normalizedZ = np.zeros(3, dtype=np.float), np.zeros(3, dtype=np.float), \
np.zeros(3, dtype=np.float)
arrow = vtkArrowSource()
arrow.SetShaftResolution(settings.res_atom)
arrow.SetTipResolution(settings.res_atom)
arrow.SetShaftRadius(0.005*10)
arrow.SetTipLength(0.4)
arrow.SetTipRadius(0.01*10)
# The X axis is a vector from start to end
math = vtkMath()
math.Subtract(endPoint, startPoint, normalizedX)
length = math.Norm(normalizedX)
math.Normalize(normalizedX)
# The Z axis is an arbitrary vector cross X
arbitrary = np.asarray([0.2, -0.3, 1.7])
math.Cross(normalizedX, arbitrary, normalizedZ)
math.Normalize(normalizedZ)
# The Y axis is Z cross X
math.Cross(normalizedZ, normalizedX, normalizedY)
matrix = vtkMatrix4x4()
matrix.Identity() # Create the direction cosine matrix
for i in range(3):
matrix.SetElement(i, 0, normalizedX[i])
matrix.SetElement(i, 1, normalizedY[i])
matrix.SetElement(i, 2, normalizedZ[i])
# Apply the transforms
transform = vtkTransform()
transform.Translate(startPoint)
transform.Concatenate(matrix)
transform.Scale(length, length, length)
# Transform the polydata
transformPD = vtkTransformPolyDataFilter()
transformPD.SetTransform(transform)
transformPD.SetInputConnection(arrow.GetOutputPort())
# Create a mapper and connect it to the source data, set up actor
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(transformPD.GetOutputPort())
arrow_actor = vtkActor()
arrow_actor.GetProperty().SetColor(col)
arrow_actor.GetProperty().SetLighting(settings.use_light)
arrow_actor.GetProperty().SetOpacity(settings.alpha_arrow)
arrow_actor.GetProperty().ShadingOn()
arrow_actor.SetMapper(mapper)
self.ren.AddActor(arrow_actor)
def add_arrow(self, direction, length, settings):
""" Adds a single arrow defined by length and reference axis """
# Generate start and end points based on length and reference axis
if direction == 'x':
startPoint, endPoint = np.asarray([-length, 0.0, 0.0]), np.asarray([length, 0.0, 0.0])
elif direction == 'y':
startPoint, endPoint = np.asarray([0.0, -length, 0.0]), np.asarray([0.0, length, 0.0])
elif direction == 'z':
startPoint, endPoint = np.asarray([0.0, 0.0, -length]), np.asarray([0.0, 0.0, length])
normalizedX, normalizedY, normalizedZ = np.zeros(3, dtype=np.float), np.zeros(3, dtype=np.float), \
np.zeros(3, dtype=np.float)
arrow = vtkArrowSource()
arrow.SetShaftResolution(settings.res_atom)
arrow.SetTipResolution(settings.res_atom)
arrow.SetShaftRadius(0.005)
arrow.SetTipLength(0.12)
arrow.SetTipRadius(0.02)
# The X axis is a vector from start to end
math = vtkMath()
math.Subtract(endPoint, startPoint, normalizedX)
length = math.Norm(normalizedX)
math.Normalize(normalizedX)
# The Z axis is an arbitrary vector cross X
arbitrary = np.asarray([0.2, -0.3, 1.7])
math.Cross(normalizedX, arbitrary, normalizedZ)
math.Normalize(normalizedZ)
# The Y axis is Z cross X
math.Cross(normalizedZ, normalizedX, normalizedY)
matrix = vtkMatrix4x4()
matrix.Identity() # Create the direction cosine matrix
for i in range(3):
matrix.SetElement(i, 0, normalizedX[i])
matrix.SetElement(i, 1, normalizedY[i])
matrix.SetElement(i, 2, normalizedZ[i])
# Apply the transforms
transform = vtkTransform()
transform.Translate(startPoint)
transform.Concatenate(matrix)
transform.Scale(length, length, length)
# Transform the polydata
transformPD = vtkTransformPolyDataFilter()
transformPD.SetTransform(transform)
transformPD.SetInputConnection(arrow.GetOutputPort())
# Create a mapper and connect it to the source data, set up actor
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(transformPD.GetOutputPort())
arrow_actor = vtkActor()
arrow_actor.GetProperty().SetColor(settings.arrow_col_rgb)
arrow_actor.GetProperty().SetLighting(settings.use_light)
arrow_actor.GetProperty().SetOpacity(settings.alpha_arrow)
arrow_actor.GetProperty().ShadingOn()
arrow_actor.SetMapper(mapper)
self.ren.AddActor(arrow_actor)
def add_atom(self, pos, radius, color, settings):
""" Adds a single atom as vtkSphere with defined radius and color at the given position """
# Create new SphereSource and define its properties
atom = vtkSphereSource()
atom.SetCenter(pos)
atom.SetRadius(radius*settings.scale_at)
atom.SetPhiResolution(settings.res_atom)
atom.SetThetaResolution(settings.res_atom)
# Create a mapper and connect it to the source data, set up actor
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(atom.GetOutputPort())
at_actor = vtkActor()
at_actor.GetProperty().SetColor(color)
at_actor.GetProperty().SetOpacity(settings.alpha_at)
at_actor.GetProperty().SetLighting(settings.use_light)
at_actor.GetProperty().ShadingOn()
at_actor.SetMapper(mapper)
self.ren.AddActor(at_actor)
self.at_actors_list.append(at_actor)
def add_com(self, molecule, radius, color, settings):
""" Adds center of mass """
self.add_atom(molecule.com*settings.scale_glob, radius, color, settings)
def add_all_atoms(self, molecule, settings):
""" Wrapper for the addition of all atoms from the molecule """
if settings.name == 'Wireframe': # Wireframe plot style
radii = np.repeat(0.76, molecule.n_atoms)
color = molecule.col_at_rgb
elif self.plot_type == 'substructure': # Substructure selection
radii = np.repeat(1.5, molecule.n_atoms)
color = np.transpose(np.repeat(molecule.col_glob_rgb,
molecule.n_atoms).reshape((3, molecule.n_atoms)))
else:
radii = molecule.rad_plt_vtk
color = molecule.col_at_rgb
[self.add_atom(molecule.cor[atom]*settings.scale_glob, radii[atom],
color[atom], settings) for atom in range(molecule.n_atoms)]
def add_all_atoms_superpos(self, align, settings):
""" Wrapper for the addition of all atoms for the superposition plot """
if settings.name == 'Wireframe':
radii = np.repeat(0.76, align.n_atoms)
else:
radii = align.rad_plt_vtk
[self.add_atom(align.cor_mol1_kbs[atom]*settings.scale_glob, radii[atom],
align.col_at_mol1_rgb[atom], settings) for atom in range(align.n_atoms)]
[self.add_atom(align.cor_mol2_kbs[atom]*settings.scale_glob, radii[atom],
align.col_at_mol2_rgb[atom], settings) for atom in range(align.n_atoms)]
def add_bond(self, first_loc, second_loc, color, settings):
""" Adds a single bond as vtkLine between two locations """
if np.linalg.norm(first_loc - second_loc) > self.bnd_eps:
# Create LineSource and set start and end point
bnd_source = vtkLineSource()
bnd_source.SetPoint1(first_loc)
bnd_source.SetPoint2(second_loc)
# Create a TubeFilter around the line
TubeFilter = vtkTubeFilter()
TubeFilter.SetInputConnection(bnd_source.GetOutputPort())
TubeFilter.SetRadius(settings.rad_bnd)
TubeFilter.SetNumberOfSides(settings.res_bond)
TubeFilter.CappingOn()
# Map data, create actor and set the color
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(TubeFilter.GetOutputPort())
bnd_actor = vtkActor()
bnd_actor.GetProperty().SetColor(color)
bnd_actor.GetProperty().SetOpacity(settings.alpha_at)
bnd_actor.GetProperty().SetLighting(settings.use_light)
bnd_actor.GetProperty().ShadingOn()
bnd_actor.SetMapper(mapper)
self.ren.AddActor(bnd_actor)
def add_kabsch_bond(self, first_loc, second_loc, color1, color2, color3, settings):
""" Makes a single bond as a combination of three segments """
if np.allclose(color1, color2):
self.add_bond(first_loc, second_loc, color1, settings)
else:
diff = (second_loc - first_loc) / 3.0
# Add all thirds to actor list
self.add_bond(first_loc, first_loc+diff, color1, settings)
self.add_bond(first_loc+diff, first_loc+2*diff, color2, settings)
self.add_bond(first_loc+2*diff, second_loc, color3, settings)
def add_all_bonds_regular(self, molecule, settings):
""" Wrapper for the addition of all bonds from the molecule """
if self.plot_type == 'substructure':
color = np.transpose(np.repeat(molecule.col_glob_rgb,
molecule.n_bonds).reshape((3, molecule.n_bonds)))
else:
color = molecule.col_bnd_rgb
[self.add_bond(molecule.cor[molecule.bnd_idx[bond][0]]*settings.scale_glob,
molecule.cor[molecule.bnd_idx[bond][1]]*settings.scale_glob,
color[bond], settings) for bond in range(molecule.n_bonds)]
def add_all_bonds_disordered(self, molecule1, molecule2, settings):
""" Wrapper for the addition of all disordered positions between the molecules """
if settings.n_dev > 0 and molecule1.disord_pos is not None:
color_rgb = np.asarray(settings.col_disord_rgb) # RGB color for disordered positions
disord_col = np.transpose(np.repeat(color_rgb, settings.n_dev).reshape((3, settings.n_dev)))
[self.add_bond(molecule1.cor[molecule1.disord_pos[pos]]*settings.scale_glob,
molecule2.cor[molecule1.disord_pos[pos]]*settings.scale_glob,
disord_col[pos], settings) for pos in range(settings.n_dev)]
def add_all_bonds_kabsch(self, align, settings):
""" Wrapper for the addition of all bonds (Kabsch) from the molecule """
if align.chd_bnd_col_rgb is None: # Check if changed bonds exist at all - if they don't: use normal bonds
[self.add_bond(align.cor[align.bnd_idx[bond][0]]*settings.scale_glob,
align.cor[align.bnd_idx[bond][1]]*settings.scale_glob,
align.col_bnd_glob_rgb, settings) for bond in range(align.n_bonds)]
[self.add_kabsch_bond(align.cor[align.bnd_idx[bond][0]]*settings.scale_glob,
align.cor[align.bnd_idx[bond][1]]*settings.scale_glob,
align.col_bnd_rgb[bond], align.chd_bnd_col_rgb[bond], align.col_bnd_rgb[bond], settings)
for bond in range(align.n_bonds)]
def add_all_bonds_superpos(self, align, settings):
""" Wrapper for the addition of all bonds for the superposition plot """
[self.add_bond(align.cor_mol1_kbs[align.bnd_idx[bond][0]]*settings.scale_glob,
align.cor_mol1_kbs[align.bnd_idx[bond][1]]*settings.scale_glob,
align.col_bnd_mol1_rgb[bond], settings) for bond in range(align.n_bonds)]
[self.add_bond(align.cor_mol2_kbs[align.bnd_idx[bond][0]]*settings.scale_glob,
align.cor_mol2_kbs[align.bnd_idx[bond][1]]*settings.scale_glob,
align.col_bnd_mol2_rgb[bond], settings) for bond in range(align.n_bonds)]
def add_label(self, coords, color, label):
""" Adds a label at the given coordinate """
source = vtkVectorText()
source.SetText(label)
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor(color)
follower.SetPosition(coords)
follower.SetScale(0.4)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera())
def add_all_labels(self, molecule, settings):
""" Wrapper for the addition of all labels for the molecule """
if settings.name == 'Wireframe':
radii = np.transpose(np.reshape(np.repeat((0.0, 0.0, 0.76), molecule.n_atoms), (3, molecule.n_atoms)))
elif self.plot_type == 'substructure':
radii = np.repeat(1.5, molecule.n_atoms)
else:
radii = np.transpose(np.vstack((np.zeros(molecule.n_atoms), np.zeros(molecule.n_atoms),
molecule.rad_plt_vtk)))
if settings.draw_labels:
label_color = [0.0, 0.0, 0.0]
if settings.label_type == 'full':
labels = molecule.sym_idf
elif settings.label_type == 'symbol_only':
labels = molecule.sym
[self.add_label(molecule.cor[atom]*settings.scale_glob+radii[atom]*settings.scale_at, label_color,
labels[atom]) for atom in range(molecule.n_atoms)]
def add_legend(self, molecule1, molecule2, settings):
""" Adds a legend to the VTK renderer """
cube_source = vtkCubeSource()
cube_source.SetBounds(-0.001,0.001,-0.001,0.001,-0.001,0.001)
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(cube_source.GetOutputPort()) # connect source and mapper
cube_actor = vtkActor()
cube_actor.SetMapper(mapper);
cube_actor.GetProperty().SetColor(settings.backgr_col_rgb) # Set cube color to background
legendBox = vtkLegendBoxActor() # Adds the actual legend box
legendBox.SetBackgroundColor(settings.backgr_col_rgb) # NOT WORKING - why
legendBox.SetBorder(1) # No border
legendBox.SetBox(2)
legendBox.SetNumberOfEntries(2)
if self.plot_type == 'initial':
legendBox.SetEntry(0, cube_source.GetOutput(), molecule1.name, settings.col_model_rgb)
legendBox.SetEntry(1, cube_source.GetOutput(), molecule2.name, settings.col_refer_rgb)
elif self.plot_type == 'superpos':
legendBox.SetEntry(0, cube_source.GetOutput(), molecule2.name1, settings.col_model_fin_rgb)
legendBox.SetEntry(1, cube_source.GetOutput(), molecule2.name2, settings.col_refer_fin_rgb)
pos1, pos2 = legendBox.GetPositionCoordinate(), legendBox.GetPosition2Coordinate()
pos1.SetCoordinateSystemToView(), pos2.SetCoordinateSystemToView()
pos1.SetValue(.4, -1.0), pos2.SetValue(1.0, -0.75)
self.ren.AddActor(cube_actor)
self.ren.AddActor(legendBox)
def add_color_bar(self, settings):
""" Adds a color bar to the VTK scene """
# Generate and customize lookuptable
lut = vtkLookupTable()
lut.SetHueRange(1/3.0, 0.0) # From green to red
lut.SetSaturationRange(1.0, 1.0)
lut.SetValueRange(1.0, 1.0)
lut.SetAlphaRange(1.0, 1.0)
lut.SetNumberOfColors(settings.n_col_aRMSD)
lut.SetRange(0.0, settings.max_RMSD_diff) # Labels from 0.0 to max_RMSD
lut.Build() # Build the table
# Create the scalar_bar
scalar_bar = vtkScalarBarActor()
scalar_bar.SetTitle(' ') # Otherwise it causes a string error
scalar_bar.GetProperty().SetColor(0.0, 0.0, 0.0)
scalar_bar.SetLabelFormat('%-#6.2g') # Two digits
scalar_bar.SetNumberOfLabels(8)
scalar_bar.SetLookupTable(lut)
self.ren.AddActor(scalar_bar)
# Create the scalar_bar_widget
#scalar_bar_widget = vtkScalarBarWidget()
#scalar_bar_widget.SetInteractor(self.iren)
#scalar_bar_widget.SetScalarBarActor(scalar_bar)
#scalar_bar_widget.On()
def add_camera_setting(self, molecule):
""" Adds a camera orientation from the molecule to the VTK object """
self.has_cam_vtk = molecule.has_cam_vtk
self.cam_vtk_pos = molecule.cam_vtk_pos
self.cam_vtk_focal_pt = molecule.cam_vtk_focal_pt
self.cam_vtk_view_up = molecule.cam_vtk_view_up
def make_initial_plot(self, molecule1, molecule2, settings):
""" Calls all functions needed for the initial plot """
self.plot_type = 'initial'
arrow_length = np.max(np.abs(molecule2.cor)) * 1.25 * settings.scale_glob
self.add_all_atoms(molecule1, settings)
self.add_all_bonds_regular(molecule1, settings)
self.add_all_atoms(molecule2, settings)
self.add_all_bonds_regular(molecule2, settings)
self.add_all_bonds_disordered(molecule1, molecule2, settings)
self.add_all_labels(molecule1, settings)
self.add_all_labels(molecule2, settings)
if settings.draw_arrows: # Draw arrows
self.add_arrow('x', arrow_length, settings)
self.add_arrow('y', arrow_length, settings)
self.add_arrow('z', arrow_length, settings)
if settings.draw_labels: # Draw arrow labels
self.add_label([arrow_length, 0.0, 0.0], settings.arrow_col_rgb, 'X')
self.add_label([0.0, arrow_length, 0.0], settings.arrow_col_rgb, 'Y')
self.add_label([0.0, 0.0, arrow_length], settings.arrow_col_rgb, 'Z')
if settings.draw_legend: # Draw legend
self.add_legend(molecule1, molecule2, settings)
self.add_camera_setting(molecule2)
def make_inertia_plot(self, molecule1, molecule2, pa_mol1, pa_mol2, settings):
""" Calls all functions for the inertia tensor plot """
radius = 1.3
self.plot_type = 'inertia'
arrow_length = np.max(np.abs(molecule1.cor)) * 1.25 * settings.scale_glob
arrow_length2 = np.max(np.abs(molecule2.cor)) * 0.65 * settings.scale_glob
self.add_all_atoms(molecule1, settings)
self.add_all_bonds_regular(molecule1, settings)
self.add_all_atoms(molecule2, settings)
self.add_all_bonds_regular(molecule2, settings)
self.add_com(molecule1, radius, settings.col_model_inertia_rgb, settings)
self.add_com(molecule2, radius, settings.col_refer_inertia_rgb, settings)
self.add_principal_axes(molecule1.com, pa_mol1[0], arrow_length2, settings.col_model_inertia_rgb, settings)
self.add_principal_axes(molecule1.com, pa_mol1[1], arrow_length2, settings.col_model_inertia_rgb, settings)
self.add_principal_axes(molecule1.com, pa_mol1[2], arrow_length2, settings.col_model_inertia_rgb, settings)
self.add_principal_axes(molecule2.com, pa_mol2[0], arrow_length2, settings.col_refer_inertia_rgb, settings)
self.add_principal_axes(molecule2.com, pa_mol2[1], arrow_length2, settings.col_refer_inertia_rgb, settings)
self.add_principal_axes(molecule2.com, pa_mol2[2], arrow_length2, settings.col_refer_inertia_rgb, settings)
if settings.draw_arrows: # Draw arrows
self.add_arrow('x', arrow_length, settings)
self.add_arrow('y', arrow_length, settings)
self.add_arrow('z', arrow_length, settings)
self.add_camera_setting(molecule2)
def make_kabsch_plot(self, align, settings):
""" Calls all functions needed for the Kabsch plot """
self.plot_type = 'aRMSD'
self.add_all_atoms(align, settings)
self.add_all_bonds_kabsch(align, settings)
self.add_all_labels(align, settings)
self.add_camera_setting(align)
if settings.use_aRMSD_col and settings.draw_col_map: # If aRMSD colors are requested
self.add_color_bar(settings)
# Connect with picker
self.picker = aRMSD_plot_picker(settings, self.at_actors_list, align)
self.picker.SetDefaultRenderer(self.ren)
self.iren.SetInteractorStyle(self.picker)
def make_substructure_plot(self, align, settings):
""" Calls all functions needed for the substructure selection plot """
self.plot_type = 'substructure'
self.add_all_atoms(align, settings)
self.add_all_bonds_regular(align, settings)
self.add_all_labels(align, settings)
self.add_camera_setting(align)
# Connect with picker
self.picker = aRMSD_substructure_picker(settings, self.at_actors_list,
align, plot_type='substructure', picker_type='normal')
self.picker.SetDefaultRenderer(self.ren)
self.iren.SetInteractorStyle(self.picker)
def make_superpos_plot(self, align, settings):
""" Calls all functions needed for the superposition plot """
self.plot_type = 'superpos'
self.add_all_atoms_superpos(align, settings)
self.add_all_bonds_superpos(align, settings)
self.add_all_labels(align, settings)
if settings.draw_legend: # Draw legend
self.add_legend(align, align, settings)
self.add_camera_setting(align)
def make_fractional_plot(self, xray, settings, picker_type):
""" Calls all functions needed for the fractional coordinates plot """
self.plot_type = 'fractional'
self.add_all_atoms(xray, settings)
self.add_all_bonds_regular(xray, settings)
self.add_camera_setting(xray)
# Connect with picker
self.picker = aRMSD_substructure_picker(settings, self.at_actors_list, xray, self.plot_type, picker_type)
self.picker.SetDefaultRenderer(self.ren)
self.iren.SetInteractorStyle(self.picker)
class Molecular_Viewer_mpl(object):
""" A molecular viewer object based on matplotlib used for 3d plots """
def __init__(self):
""" Initializes the molecular viewer """
self.space = plt.figure() # Define plotting space and axes
self.axes = self.space.add_subplot(111)
self.axes.grid(False) # Switch off grid
self.axes.axis('off') # Switch off axis
self.n_plots = 1
def colorbar_plot(self, align, settings):
""" Contains all functions for the Kabsch plot in aRMSD representation """
# Set up color map, bounds for colorbar (rounded to second digit) and normalize boundary
cmap = mpl.colors.ListedColormap(align.plt_col_aRMSD)
spacing = 0.1
# Adjust the colorbar spacing for small and large RMSD distributions
if settings.max_RMSD_diff < 0.5:
spacing = 0.05
if settings.max_RMSD_diff >= 2.0:
spacing = 0.2
# 0.0 to settings.max_RMSD_diff with given spacing
bounds = np.around(np.arange(0.0, settings.max_RMSD_diff+0.1, spacing), 2)
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# Create a second axes for the colorbar
self.axes2 = self.space.add_axes([0.88, 0.1, 0.03, 0.8]) # Global values (do not change)
# Create colorbar
mpl.colorbar.ColorbarBase(self.axes2, cmap=cmap, norm=norm,
spacing='proportional', ticks=bounds, boundaries=bounds)
# Set y label and label size
self.axes2.set_ylabel(r'RMSD / $\AA$', size=12)
self.space.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.0) # Set margins of the plot window
plt.show() # Show result
class Statistics_mpl(object):
""" A statistics object based on matplotlib used for 2d plots """
def __init__(self):
""" Initializes the main window via gridspec and defines plotting colors """
# Define subplot locations and set titles and grids
self.gs = gridspec.GridSpec(3,3, left=0.08, bottom=0.08, right=0.96, top=0.92, wspace=0.30, hspace=0.97)
self.ax1 = plt.subplot(self.gs[0, :-1])
self.ax2 = plt.subplot(self.gs[1:, :-1])
self.ax3 = plt.subplot(self.gs[0:, -1])
def plot(self):
""" Plots result """
mng = plt.get_current_fig_manager() # Open directly in full window
if mpl.get_backend() == 'Qt4Agg': # 'Qt4' backend
mng.window.showMaximized()
elif mpl.get_backend() == 'WxAgg': # 'WxAgg' backend
mng.frame.Maximize(True)
elif mpl.get_backend() == 'TKAgg': # 'TKAgg' backend
mng.frame.Maximize(True)
plt.show(all) # Show all plots
def linregress(self, x, y):
""" Calculate a least-squares regression for two sets of measurements (taken from scipy) """
eps = 1.0E-20
x, y = np.asarray(x), np.asarray(y)
n = len(x)
xmean, ymean = np.mean(x, None), np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + eps)*(1.0 + r + eps)))
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
return slope, intercept, r, sterrest
def do_stats_quant(self, align, logger, settings, prop='bond_dist'):
""" Wrapper for the calculation and plotting of individual statistic evaluations """
# Details for the handling of the different quantities
if prop == 'bond_dist':
data_mol1 = align.bnd_dis_mol1
data_mol2 = align.bnd_dis_mol2
plot_color = settings.new_red
plt_axis = self.ax1
title_prefix = 'All Bond Distances:'
label_suffix = ' distances'
label_unit = r' $\AA$'
extra_space = 0.2
# Do actual statistics for the two data sets
m, b, r, sterrest = self.linregress(data_mol2, data_mol1)
limits, x_axis, rmsd = self.prep_simulation(data_mol2, data_mol1, settings)
logger.prop_bnd_dist_rmsd, logger.prop_bnd_dist_r_sq = rmsd, r**2 # Log quality descriptors
elif prop == 'bond_dist_types':
# Mean values
data_mol1 = np.asarray([np.mean(align.bnd_dis_mol1[align.bnd_type_pos[entry]])
for entry in range(align.n_bnd_types)])
data_mol2 = np.asarray([np.mean(align.bnd_dis_mol2[align.bnd_type_pos[entry]])
for entry in range(align.n_bnd_types)])
# Calculate error
if settings.error_prop == 'std': # Standard deviations
error_prop1 = np.asarray([np.std(align.bnd_dis_mol1[align.bnd_type_pos[entry]])
for entry in range(align.n_bnd_types)])
error_prop2 = np.asarray([np.std(align.bnd_dis_mol2[align.bnd_type_pos[entry]])
for entry in range(align.n_bnd_types)])
elif settings.error_prop == 'var': # Variances
error_prop1 = np.asarray([np.var(align.bnd_dis_mol1[align.bnd_type_pos[entry]])
for entry in range(align.n_bnd_types)])
error_prop2 = np.asarray([np.var(align.bnd_dis_mol2[align.bnd_type_pos[entry]])
for entry in range(align.n_bnd_types)])
plot_color = settings.new_red
plt_axis = self.ax2
title_prefix = 'Average Distances per Bond Type:'
label_suffix = ' distance types'
label_unit = r' $\AA$'
extra_space = 0.1 + np.max(np.hstack((error_prop1, error_prop2))) # Additional extra space for markers
# Do actual statistics for the two data sets
m, b, r, sterrest = self.linregress(data_mol2, data_mol1)
limits, x_axis, rmsd = self.prep_simulation(data_mol2, data_mol1, settings)
logger.prop_bnd_dist_type_rmsd, logger.prop_bnd_dist_type_r_sq = rmsd, r**2 # Log quality descriptors
if align.n_bnd_types <= 2:
logger.pt_warning_bond_types() # Warn user if 2 or less bond types were found
elif prop == 'angles':
data_mol1 = align.ang_deg_mol1
data_mol2 = align.ang_deg_mol2
plot_color = settings.new_green
plt_axis = self.ax3
title_prefix = 'All Angles:'
label_suffix = ' angles'
label_unit = r' $^\circ$'
extra_space = 3.5
# Do actual statistics for the two data sets
m, b, r, sterrest = self.linregress(data_mol2, data_mol1)
limits, x_axis, rmsd = self.prep_simulation(data_mol2, data_mol1, settings)
logger.prop_ang_rmsd, logger.prop_ang_r_sq = rmsd, r**2 # Log quality descriptors
elif prop == 'torsions':
data_mol1 = align.tor_deg_mol1
data_mol2 = align.tor_deg_mol2
plot_color = settings.new_blue
plt_axis = self.ax3
title_prefix = 'All Angles / Dihedrals:'
label_suffix = ' dihedrals'
label_unit = r' $^\circ$'
extra_space = 3.5
# Do actual statistics for the two data sets
m, b, r, sterrest = self.linregress(data_mol2, data_mol1)
limits, x_axis, rmsd = self.prep_simulation(data_mol2, data_mol1, settings)
logger.prop_tor_rmsd, logger.prop_tor_r_sq = rmsd, r**2 # Log quality descriptors
# Generate all titles and labels
ax_title = title_prefix + ' RMSE = ' + str(np.around(rmsd, settings.calc_prec_stats)) + label_unit
xlabel = align.name2+' /' + label_unit
ylabel = align.name1+' /' + label_unit
plt_axis.set_title(ax_title, fontsize=settings.title_pt)
plt_axis.set_xlabel(xlabel, fontsize=settings.ax_pt, style='italic')
plt_axis.set_ylabel(ylabel, fontsize=settings.ax_pt, style='italic')
plt_axis.grid(False)
label_data = str(len(data_mol2)) + label_suffix
label_fit = r'R$^2$ = '+str(np.around(r**2, settings.calc_prec_stats))
log_rmsd, log_r_sq = rmsd, r**2 # Log quality of correlation
# Plot linear correlation and fit / adjust axes limits
if prop == 'bond_dist_types':
plt_axis.errorbar(data_mol2, data_mol1, xerr=error_prop2, yerr=error_prop1, fmt="o",
ms=8.5, mfc=plot_color, mew=0.75, zorder=2, mec=plot_color, label=label_data)
[plt_axis.text(data_mol2[pos], data_mol1[pos] - 0.1,
align.bnd_label[pos], zorder=3, fontsize=13) for pos in range(align.n_bnd_types)]
add_lim = np.asarray([-0.1, 0.1], dtype=np.float)
limits += add_lim
else:
plt_axis.plot(data_mol2, data_mol1, "o", ms=8.5, mfc=plot_color, mew=0.75,
zorder=1, mec=plot_color, label=label_data)
plt_axis.plot(x_axis, m*x_axis+b, lw=2, zorder=1, color=plot_color, label=label_fit)
plt_axis.set_xlim([limits[0] - extra_space, limits[1] + extra_space])
plt_axis.set_ylim([limits[0] - extra_space, limits[1] + extra_space])
# Draw legend and add grid upon request
if settings.stats_draw_legend:
plt_axis.legend(loc=settings.legend_pos, frameon=False)
if settings.stats_draw_grid:
plt_axis.grid()
def prep_simulation(self, data1, data2, settings):
""" Calculates the RMSE of two data sets and generates axis for linear regression """
# Determine lowest and highest values of the combined data
stack = np.hstack((data1, data2))
limits = [np.min(stack), np.max(stack)]
# Calculate RMSD of the data sets
rmsd = np.around(np.sqrt(np.sum(np.abs(data2 - data1)**2 / len(data2))), 4)
# Determine step size and axis
step_size_all = ((limits[1] + settings.splitter) - (limits[0] - settings.splitter)) / len(data2)
axis = np.arange(limits[0] - settings.splitter, limits[1] + settings.splitter, step_size_all)
return limits, axis, rmsd
| |
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from threading import Thread
from django import forms
from django.core.urlresolvers import reverse
from django.http import HttpRequest, QueryDict
from django.test import TestCase, override_settings
from django.utils.six.moves import queue
from test_haystack.core.models import AnotherMockModel, MockModel
from haystack import connections, indexes
from haystack.forms import FacetedSearchForm, ModelSearchForm, SearchForm
from haystack.query import EmptySearchQuerySet
from haystack.utils.loading import UnifiedIndex
from haystack.views import FacetedSearchView, SearchView, search_view_factory
class InitialedSearchForm(SearchForm):
q = forms.CharField(initial='Search for...', required=False, label='Search')
class BasicMockModelSearchIndex(indexes.BasicSearchIndex, indexes.Indexable):
def get_model(self):
return MockModel
class BasicAnotherMockModelSearchIndex(indexes.BasicSearchIndex, indexes.Indexable):
def get_model(self):
return AnotherMockModel
class SearchViewTestCase(TestCase):
fixtures = ['base_data']
def setUp(self):
super(SearchViewTestCase, self).setUp()
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
def tearDown(self):
connections['default']._index = self.old_unified_index
super(SearchViewTestCase, self).tearDown()
def test_search_no_query(self):
response = self.client.get(reverse('haystack_search'))
self.assertEqual(response.status_code, 200)
def test_search_query(self):
response = self.client.get(reverse('haystack_search'), {'q': 'haystack'})
self.assertEqual(response.status_code, 200)
self.assertIn('page', response.context)
self.assertNotIn('page_obj', response.context)
self.assertEqual(len(response.context[-1]['page'].object_list), 3)
self.assertEqual(response.context[-1]['page'].object_list[0].content_type(), u'core.mockmodel')
self.assertEqual(response.context[-1]['page'].object_list[0].pk, '1')
def test_invalid_page(self):
response = self.client.get(reverse('haystack_search'), {'q': 'haystack', 'page': '165233'})
self.assertEqual(response.status_code, 404)
def test_empty_results(self):
sv = SearchView()
sv.request = HttpRequest()
sv.form = sv.build_form()
self.assertTrue(isinstance(sv.get_results(), EmptySearchQuerySet))
def test_initial_data(self):
sv = SearchView(form_class=InitialedSearchForm)
sv.request = HttpRequest()
form = sv.build_form()
self.assertTrue(isinstance(form, InitialedSearchForm))
self.assertEqual(form.fields['q'].initial, 'Search for...')
para = form.as_p()
self.assertTrue(u'<label for="id_q">Search:</label>' in para)
self.assertTrue(u'value="Search for..."' in para)
def test_pagination(self):
response = self.client.get(reverse('haystack_search'), {'q': 'haystack', 'page': 0})
self.assertEqual(response.status_code, 404)
response = self.client.get(reverse('haystack_search'), {'q': 'haystack', 'page': 1})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context[-1]['page'].object_list), 3)
response = self.client.get(reverse('haystack_search'), {'q': 'haystack', 'page': 2})
self.assertEqual(response.status_code, 404)
def test_thread_safety(self):
exceptions = []
def threaded_view(resp_queue, view, request):
time.sleep(2)
try:
view(request)
resp_queue.put(request.GET['name'])
except Exception as e:
exceptions.append(e)
raise
class ThreadedSearchView(SearchView):
def __call__(self, request):
print("Name: %s" % request.GET['name'])
return super(ThreadedSearchView, self).__call__(request)
view = search_view_factory(view_class=ThreadedSearchView)
resp_queue = queue.Queue()
request_1 = HttpRequest()
request_1.GET = {'name': 'foo'}
request_2 = HttpRequest()
request_2.GET = {'name': 'bar'}
th1 = Thread(target=threaded_view, args=(resp_queue, view, request_1))
th2 = Thread(target=threaded_view, args=(resp_queue, view, request_2))
th1.start()
th2.start()
th1.join()
th2.join()
foo = resp_queue.get()
bar = resp_queue.get()
self.assertNotEqual(foo, bar)
def test_spelling(self):
# Stow.
from django.conf import settings
old = settings.HAYSTACK_CONNECTIONS['default'].get('INCLUDE_SPELLING', None)
settings.HAYSTACK_CONNECTIONS['default']['INCLUDE_SPELLING'] = True
sv = SearchView()
sv.query = 'Nothing'
sv.results = []
sv.build_page = lambda: (None, None)
sv.create_response()
context = sv.get_context()
self.assertIn('suggestion', context,
msg='Spelling suggestions should be present even if'
' no results were returned')
self.assertEqual(context['suggestion'], None)
# Restore
settings.HAYSTACK_CONNECTIONS['default']['INCLUDE_SPELLING'] = old
if old is None:
del settings.HAYSTACK_CONNECTIONS['default']['INCLUDE_SPELLING']
@override_settings(ROOT_URLCONF='test_haystack.results_per_page_urls')
class ResultsPerPageTestCase(TestCase):
fixtures = ['base_data']
def setUp(self):
super(ResultsPerPageTestCase, self).setUp()
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
def tearDown(self):
connections['default']._index = self.old_unified_index
super(ResultsPerPageTestCase, self).tearDown()
def test_custom_results_per_page(self):
response = self.client.get('/search/', {'q': 'haystack'})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context[-1]['page'].object_list), 1)
self.assertEqual(response.context[-1]['paginator'].per_page, 1)
response = self.client.get('/search2/', {'q': 'hello world'})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context[-1]['page'].object_list), 2)
self.assertEqual(response.context[-1]['paginator'].per_page, 2)
class FacetedSearchViewTestCase(TestCase):
def setUp(self):
super(FacetedSearchViewTestCase, self).setUp()
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
def tearDown(self):
connections['default']._index = self.old_unified_index
super(FacetedSearchViewTestCase, self).tearDown()
def test_search_no_query(self):
response = self.client.get(reverse('haystack_faceted_search'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['facets'], {})
def test_empty_results(self):
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict('')
fsv.form = fsv.build_form()
self.assertTrue(isinstance(fsv.get_results(), EmptySearchQuerySet))
def test_default_form(self):
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict('')
fsv.form = fsv.build_form()
self.assertTrue(isinstance(fsv.form, FacetedSearchForm))
def test_list_selected_facets(self):
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict('')
fsv.form = fsv.build_form()
self.assertEqual(fsv.form.selected_facets, [])
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict('selected_facets=author:daniel&selected_facets=author:chris')
fsv.form = fsv.build_form()
self.assertEqual(fsv.form.selected_facets, [u'author:daniel', u'author:chris'])
class BasicSearchViewTestCase(TestCase):
fixtures = ['base_data']
def setUp(self):
super(BasicSearchViewTestCase, self).setUp()
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
def tearDown(self):
connections['default']._index = self.old_unified_index
super(BasicSearchViewTestCase, self).tearDown()
def test_search_no_query(self):
response = self.client.get(reverse('haystack_basic_search'))
self.assertEqual(response.status_code, 200)
def test_search_query(self):
response = self.client.get(reverse('haystack_basic_search'), {'q': 'haystack'})
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response.context[-1]['form']), ModelSearchForm)
self.assertEqual(len(response.context[-1]['page'].object_list), 3)
self.assertEqual(response.context[-1]['page'].object_list[0].content_type(), u'core.mockmodel')
self.assertEqual(response.context[-1]['page'].object_list[0].pk, '1')
self.assertEqual(response.context[-1]['query'], u'haystack')
def test_invalid_page(self):
response = self.client.get(reverse('haystack_basic_search'), {'q': 'haystack', 'page': '165233'})
self.assertEqual(response.status_code, 404)
| |
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_config import cfg
from futurist import periodics
from neutron_lib.api.definitions import external_net as extnet_apidef
from neutron_lib.api.definitions import floating_ip_port_forwarding as pf_def
from neutron_lib import constants as n_const
from neutron_lib import context as n_context
from neutron_lib.exceptions import l3 as lib_l3_exc
from neutron.common.ovn import constants as ovn_const
from neutron.common.ovn import utils
from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf as ovn_config
from neutron.db import ovn_revision_numbers_db as db_rev
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import maintenance
from neutron.tests.functional import base
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.extensions import test_extraroute
class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
"""A helper class to keep the code more organized."""
def setUp(self):
super(_TestMaintenanceHelper, self).setUp()
self._ovn_client = self.mech_driver._ovn_client
self._l3_ovn_client = self.l3_plugin._ovn_client
ext_mgr = test_extraroute.ExtraRouteTestExtensionManager()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
self.maint = maintenance.DBInconsistenciesPeriodics(self._ovn_client)
self.context = n_context.get_admin_context()
# Always verify inconsistencies for all objects.
db_rev.INCONSISTENCIES_OLDER_THAN = -1
def _find_network_row_by_name(self, name):
for row in self.nb_api._tables['Logical_Switch'].rows.values():
if (row.external_ids.get(
ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY) == name):
return row
def _create_network(self, name, external=False):
data = {'network': {'name': name, 'tenant_id': self._tenant_id,
extnet_apidef.EXTERNAL: external}}
req = self.new_create_request('networks', data, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['network']
def _update_network_name(self, net_id, new_name):
data = {'network': {'name': new_name}}
req = self.new_update_request('networks', data, net_id, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['network']
def _create_port(self, name, net_id, security_groups=None,
device_owner=None):
data = {'port': {'name': name,
'tenant_id': self._tenant_id,
'network_id': net_id}}
if security_groups is not None:
data['port']['security_groups'] = security_groups
if device_owner is not None:
data['port']['device_owner'] = device_owner
req = self.new_create_request('ports', data, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['port']
def _update_port_name(self, port_id, new_name):
data = {'port': {'name': new_name}}
req = self.new_update_request('ports', data, port_id, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['port']
def _find_port_row_by_name(self, name):
for row in self.nb_api._tables['Logical_Switch_Port'].rows.values():
if (row.external_ids.get(
ovn_const.OVN_PORT_NAME_EXT_ID_KEY) == name):
return row
def _set_global_dhcp_opts(self, ip_version, opts):
opt_string = ','.join(['{0}:{1}'.format(key, value)
for key, value
in opts.items()])
if ip_version == n_const.IP_VERSION_6:
ovn_config.cfg.CONF.set_override('ovn_dhcp6_global_options',
opt_string,
group='ovn')
if ip_version == n_const.IP_VERSION_4:
ovn_config.cfg.CONF.set_override('ovn_dhcp4_global_options',
opt_string,
group='ovn')
def _unset_global_dhcp_opts(self, ip_version):
if ip_version == n_const.IP_VERSION_6:
ovn_config.cfg.CONF.clear_override('ovn_dhcp6_global_options',
group='ovn')
if ip_version == n_const.IP_VERSION_4:
ovn_config.cfg.CONF.clear_override('ovn_dhcp4_global_options',
group='ovn')
def _create_subnet(self, name, net_id, ip_version=n_const.IP_VERSION_4,
**kwargs):
if ip_version == n_const.IP_VERSION_4:
cidr = '10.0.0.0/24'
else:
cidr = '2001:db8::/64'
data = {'subnet': {'name': name,
'network_id': net_id,
'ip_version': ip_version,
'tenant_id': self._tenant_id,
'cidr': cidr,
'enable_dhcp': True}}
data['subnet'].update(kwargs)
req = self.new_create_request('subnets', data, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['subnet']
def _update_subnet_enable_dhcp(self, subnet_id, value):
data = {'subnet': {'enable_dhcp': value}}
req = self.new_update_request('subnets', data, subnet_id, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['subnet']
def _find_subnet_row_by_id(self, subnet_id):
for row in self.nb_api._tables['DHCP_Options'].rows.values():
if (row.external_ids.get('subnet_id') == subnet_id and
not row.external_ids.get('port_id')):
return row
def _create_router(self, name, external_gateway_info=None):
data = {'router': {'name': name, 'tenant_id': self._tenant_id}}
if external_gateway_info is not None:
data['router']['external_gateway_info'] = external_gateway_info
req = self.new_create_request('routers', data, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['router']
def _update_router_name(self, net_id, new_name):
data = {'router': {'name': new_name}}
req = self.new_update_request('routers', data, net_id, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['router']
def _find_router_row_by_name(self, name):
for row in self.nb_api._tables['Logical_Router'].rows.values():
if (row.external_ids.get(
ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY) == name):
return row
def _create_security_group(self):
data = {'security_group': {'name': 'sgtest',
'tenant_id': self._tenant_id,
'description': 'SpongeBob Rocks!'}}
req = self.new_create_request('security-groups', data, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['security_group']
def _find_security_group_row_by_id(self, sg_id):
return self.nb_api.lookup(
'Port_Group', utils.ovn_port_group_name(sg_id), default=None)
def _create_security_group_rule(self, sg_id):
data = {'security_group_rule': {'security_group_id': sg_id,
'direction': 'ingress',
'protocol': n_const.PROTO_NAME_TCP,
'ethertype': n_const.IPv4,
'port_range_min': 22,
'port_range_max': 22,
'tenant_id': self._tenant_id}}
req = self.new_create_request('security-group-rules', data, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['security_group_rule']
def _find_security_group_rule_row_by_id(self, sgr_id):
for row in self.nb_api._tables['ACL'].rows.values():
if (row.external_ids.get(
ovn_const.OVN_SG_RULE_EXT_ID_KEY) == sgr_id):
return row
def _process_router_interface(self, action, router_id, subnet_id):
req = self.new_action_request(
'routers', {'subnet_id': subnet_id}, router_id,
'%s_router_interface' % action)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)
def _add_router_interface(self, router_id, subnet_id):
return self._process_router_interface('add', router_id, subnet_id)
def _remove_router_interface(self, router_id, subnet_id):
return self._process_router_interface('remove', router_id, subnet_id)
def _find_router_port_row_by_port_id(self, port_id):
for row in self.nb_api._tables['Logical_Router_Port'].rows.values():
if row.name == utils.ovn_lrouter_port_name(port_id):
return row
def _find_nat_rule(self, router_id, external_ip, logical_ip=None,
nat_type='dnat_and_snat'):
rules = self.nb_api.get_lrouter_nat_rules(utils.ovn_name(router_id))
return next((r for r in rules
if r['type'] == nat_type and
r['external_ip'] == external_ip and
(not logical_ip or r['logical_ip'] == logical_ip)),
None)
def _find_pf_lb(self, router_id, fip_id=None):
lbs = self.nb_api.get_router_floatingip_lbs(utils.ovn_name(router_id))
return [lb for lb in lbs
if (not fip_id or
fip_id == lb.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY])]
class TestMaintenance(_TestMaintenanceHelper):
def test_network(self):
net_name = 'networktest'
with mock.patch.object(self._ovn_client, 'create_network'):
neutron_obj = self._create_network(net_name)
# Assert the network doesn't exist in OVN
self.assertIsNone(self._find_network_row_by_name(net_name))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the network was now created
ovn_obj = self._find_network_row_by_name(net_name)
self.assertIsNotNone(ovn_obj)
self.assertEqual(
neutron_obj['revision_number'],
int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY]))
# > Update
new_obj_name = 'networktest_updated'
with mock.patch.object(self._ovn_client, 'update_network'):
new_neutron_obj = self._update_network_name(neutron_obj['id'],
new_obj_name)
# Assert the revision numbers are out-of-sync
ovn_obj = self._find_network_row_by_name(net_name)
self.assertNotEqual(
new_neutron_obj['revision_number'],
int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY]))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the old name doesn't exist anymore in the OVNDB
self.assertIsNone(self._find_network_row_by_name(net_name))
# Assert the network is now in sync
ovn_obj = self._find_network_row_by_name(new_obj_name)
self.assertEqual(
new_neutron_obj['revision_number'],
int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY]))
# > Delete
with mock.patch.object(self._ovn_client, 'delete_network'):
self._delete('networks', new_neutron_obj['id'])
# Assert the network still exists in OVNDB
self.assertIsNotNone(self._find_network_row_by_name(new_obj_name))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the network is now deleted from OVNDB
self.assertIsNone(self._find_network_row_by_name(new_obj_name))
# Assert the revision number no longer exists
self.assertIsNone(db_rev.get_revision_row(
self.context,
new_neutron_obj['id']))
def test_port(self):
obj_name = 'porttest'
neutron_net = self._create_network('network1')
with mock.patch.object(self._ovn_client, 'create_port'):
neutron_obj = self._create_port(obj_name, neutron_net['id'])
# Assert the port doesn't exist in OVN
self.assertIsNone(self._find_port_row_by_name(obj_name))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the port was now created
ovn_obj = self._find_port_row_by_name(obj_name)
self.assertIsNotNone(ovn_obj)
self.assertEqual(
neutron_obj['revision_number'],
int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY]))
# > Update
new_obj_name = 'porttest_updated'
with mock.patch.object(self._ovn_client, 'update_port'):
new_neutron_obj = self._update_port_name(neutron_obj['id'],
new_obj_name)
# Assert the revision numbers are out-of-sync
ovn_obj = self._find_port_row_by_name(obj_name)
self.assertNotEqual(
new_neutron_obj['revision_number'],
int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY]))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the old name doesn't exist anymore in the OVNDB
self.assertIsNone(self._find_port_row_by_name(obj_name))
# Assert the port is now in sync. Note that for ports we are
# fetching it again from the Neutron database prior to comparison
# because of the monitor code that can update the ports again upon
# changes to it.
ovn_obj = self._find_port_row_by_name(new_obj_name)
new_neutron_obj = self._ovn_client._plugin.get_port(
self.context, neutron_obj['id'])
self.assertEqual(
new_neutron_obj['revision_number'],
int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY]))
# > Delete
with mock.patch.object(self._ovn_client, 'delete_port'):
self._delete('ports', new_neutron_obj['id'])
# Assert the port still exists in OVNDB
self.assertIsNotNone(self._find_port_row_by_name(new_obj_name))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the port is now deleted from OVNDB
self.assertIsNone(self._find_port_row_by_name(new_obj_name))
# Assert the revision number no longer exists
self.assertIsNone(db_rev.get_revision_row(
self.context,
neutron_obj['id']))
def test_subnet_global_dhcp4_opts(self):
obj_name = 'globaltestsubnet'
options = {'ntp_server': '1.2.3.4'}
neutron_net = self._create_network('network1')
# Create a subnet without global options
neutron_sub = self._create_subnet(obj_name, neutron_net['id'])
# Assert that the option is not set
ovn_obj = self._find_subnet_row_by_id(neutron_sub['id'])
self.assertIsNone(ovn_obj.options.get('ntp_server', None))
# Set some global DHCP Options
self._set_global_dhcp_opts(ip_version=n_const.IP_VERSION_4,
opts=options)
# Run the maintenance task to add the new options
self.assertRaises(periodics.NeverAgain,
self.maint.check_global_dhcp_opts)
# Assert that the option was added
ovn_obj = self._find_subnet_row_by_id(neutron_sub['id'])
self.assertEqual(
'1.2.3.4',
ovn_obj.options.get('ntp_server', None))
# Change the global option
new_options = {'ntp_server': '4.3.2.1'}
self._set_global_dhcp_opts(ip_version=n_const.IP_VERSION_4,
opts=new_options)
# Run the maintenance task to update the options
self.assertRaises(periodics.NeverAgain,
self.maint.check_global_dhcp_opts)
# Assert that the option was changed
ovn_obj = self._find_subnet_row_by_id(neutron_sub['id'])
self.assertEqual(
'4.3.2.1',
ovn_obj.options.get('ntp_server', None))
# Change the global option to null
new_options = {'ntp_server': ''}
self._set_global_dhcp_opts(ip_version=n_const.IP_VERSION_4,
opts=new_options)
# Run the maintenance task to update the options
self.assertRaises(periodics.NeverAgain,
self.maint.check_global_dhcp_opts)
# Assert that the option was removed
ovn_obj = self._find_subnet_row_by_id(neutron_sub['id'])
self.assertIsNone(ovn_obj.options.get('ntp_server', None))
def test_subnet_global_dhcp6_opts(self):
obj_name = 'globaltestsubnet'
options = {'ntp_server': '1.2.3.4'}
neutron_net = self._create_network('network1')
# Create a subnet without global options
neutron_sub = self._create_subnet(obj_name, neutron_net['id'],
n_const.IP_VERSION_6)
# Assert that the option is not set
ovn_obj = self._find_subnet_row_by_id(neutron_sub['id'])
self.assertIsNone(ovn_obj.options.get('ntp_server', None))
# Set some global DHCP Options
self._set_global_dhcp_opts(ip_version=n_const.IP_VERSION_6,
opts=options)
# Run the maintenance task to add the new options
self.assertRaises(periodics.NeverAgain,
self.maint.check_global_dhcp_opts)
# Assert that the option was added
ovn_obj = self._find_subnet_row_by_id(neutron_sub['id'])
self.assertEqual(
'1.2.3.4',
ovn_obj.options.get('ntp_server', None))
# Change the global option
new_options = {'ntp_server': '4.3.2.1'}
self._set_global_dhcp_opts(ip_version=n_const.IP_VERSION_6,
opts=new_options)
# Run the maintenance task to update the options
self.assertRaises(periodics.NeverAgain,
self.maint.check_global_dhcp_opts)
# Assert that the option was changed
ovn_obj = self._find_subnet_row_by_id(neutron_sub['id'])
self.assertEqual(
'4.3.2.1',
ovn_obj.options.get('ntp_server', None))
# Change the global option to null
new_options = {'ntp_server': ''}
self._set_global_dhcp_opts(ip_version=n_const.IP_VERSION_6,
opts=new_options)
# Run the maintenance task to update the options
self.assertRaises(periodics.NeverAgain,
self.maint.check_global_dhcp_opts)
# Assert that the option was removed
ovn_obj = self._find_subnet_row_by_id(neutron_sub['id'])
self.assertIsNone(ovn_obj.options.get('ntp_server', None))
def test_subnet(self):
obj_name = 'subnettest'
neutron_net = self._create_network('network1')
with mock.patch.object(self._ovn_client, 'create_subnet'):
neutron_obj = self._create_subnet(obj_name, neutron_net['id'])
# Assert the subnet doesn't exist in OVN
self.assertIsNone(self._find_subnet_row_by_id(neutron_obj['id']))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the subnet was now created
ovn_obj = self._find_subnet_row_by_id(neutron_obj['id'])
self.assertIsNotNone(ovn_obj)
self.assertEqual(
neutron_obj['revision_number'],
int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY]))
# > Update
with mock.patch.object(self._ovn_client, 'update_subnet'):
neutron_obj = self._update_subnet_enable_dhcp(
neutron_obj['id'], False)
# Assert the revision numbers are out-of-sync
ovn_obj = self._find_subnet_row_by_id(neutron_obj['id'])
self.assertNotEqual(
neutron_obj['revision_number'],
int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY]))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the old name doesn't exist anymore in the OVNDB. When
# the subnet's enable_dhcp's is set to False, OVN will remove the
# DHCP_Options entry related to that subnet.
self.assertIsNone(self._find_subnet_row_by_id(neutron_obj['id']))
# Re-enable the DHCP for the subnet and check if the maintenance
# thread will re-create it in OVN
with mock.patch.object(self._ovn_client, 'update_subnet'):
neutron_obj = self._update_subnet_enable_dhcp(
neutron_obj['id'], True)
# Assert the DHCP_Options still doesn't exist in OVNDB
self.assertIsNone(self._find_subnet_row_by_id(neutron_obj['id']))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the subnet is now in sync
ovn_obj = self._find_subnet_row_by_id(neutron_obj['id'])
self.assertEqual(
neutron_obj['revision_number'],
int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY]))
# > Delete
with mock.patch.object(self._ovn_client, 'delete_subnet'):
self._delete('subnets', neutron_obj['id'])
# Assert the subnet still exists in OVNDB
self.assertIsNotNone(self._find_subnet_row_by_id(neutron_obj['id']))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the subnet is now deleted from OVNDB
self.assertIsNone(self._find_subnet_row_by_id(neutron_obj['id']))
# Assert the revision number no longer exists
self.assertIsNone(db_rev.get_revision_row(
self.context,
neutron_obj['id']))
def test_router(self):
obj_name = 'routertest'
with mock.patch.object(self._l3_ovn_client, 'create_router'):
neutron_obj = self._create_router(obj_name)
# Assert the router doesn't exist in OVN
self.assertIsNone(self._find_router_row_by_name(obj_name))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the router was now created
ovn_obj = self._find_router_row_by_name(obj_name)
self.assertIsNotNone(ovn_obj)
self.assertEqual(
neutron_obj['revision_number'],
int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY]))
# > Update
new_obj_name = 'routertest_updated'
with mock.patch.object(self._l3_ovn_client, 'update_router'):
new_neutron_obj = self._update_router_name(neutron_obj['id'],
new_obj_name)
# Assert the revision numbers are out-of-sync
ovn_obj = self._find_router_row_by_name(obj_name)
self.assertNotEqual(
new_neutron_obj['revision_number'],
int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY]))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the old name doesn't exist anymore in the OVNDB
self.assertIsNone(self._find_router_row_by_name(obj_name))
# Assert the router is now in sync
ovn_obj = self._find_router_row_by_name(new_obj_name)
self.assertEqual(
new_neutron_obj['revision_number'],
int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY]))
# > Delete
with mock.patch.object(self._l3_ovn_client, 'delete_router'):
self._delete('routers', new_neutron_obj['id'])
# Assert the router still exists in OVNDB
self.assertIsNotNone(self._find_router_row_by_name(new_obj_name))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the router is now deleted from OVNDB
self.assertIsNone(self._find_router_row_by_name(new_obj_name))
# Assert the revision number no longer exists
self.assertIsNone(db_rev.get_revision_row(
self.context,
new_neutron_obj['id']))
def test_security_group(self):
with mock.patch.object(self._ovn_client, 'create_security_group'):
neutron_obj = self._create_security_group()
# Assert the sg doesn't exist in OVN
self.assertIsNone(
self._find_security_group_row_by_id(neutron_obj['id']))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the sg was now created. We don't save the revision number
# in the Security Group because OVN doesn't support updating it,
# all we care about is whether it exists or not.
self.assertIsNotNone(
self._find_security_group_row_by_id(neutron_obj['id']))
# > Delete
with mock.patch.object(self._ovn_client, 'delete_security_group'):
self._delete('security-groups', neutron_obj['id'])
# Assert the sg still exists in OVNDB
self.assertIsNotNone(
self._find_security_group_row_by_id(neutron_obj['id']))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the sg is now deleted from OVNDB
self.assertIsNone(
self._find_security_group_row_by_id(neutron_obj['id']))
# Assert the revision number no longer exists
self.assertIsNone(db_rev.get_revision_row(
self.context,
neutron_obj['id']))
def test_security_group_rule(self):
neutron_sg = self._create_security_group()
neutron_net = self._create_network('network1')
self._create_port('portsgtest', neutron_net['id'],
security_groups=[neutron_sg['id']])
with mock.patch.object(self._ovn_client, 'create_security_group_rule'):
neutron_obj = self._create_security_group_rule(neutron_sg['id'])
# Assert the sg rule doesn't exist in OVN
self.assertIsNone(
self._find_security_group_rule_row_by_id(neutron_obj['id']))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the sg rule was now created. We don't save the revision number
# in the Security Group because OVN doesn't support updating it,
# all we care about is whether it exists or not.
self.assertIsNotNone(
self._find_security_group_rule_row_by_id(neutron_obj['id']))
# > Delete
# FIXME(lucasagomes): Maintenance thread fixing deleted
# security group rules is currently broken due to:
# https://bugs.launchpad.net/networking-ovn/+bug/1756123
def test_router_port(self):
neutron_net = self._create_network('networktest', external=True)
neutron_subnet = self._create_subnet('subnettest', neutron_net['id'])
neutron_router = self._create_router('routertest')
with mock.patch.object(self._l3_ovn_client, 'create_router_port'):
with mock.patch('neutron.db.ovn_revision_numbers_db.'
'bump_revision'):
neutron_obj = self._add_router_interface(neutron_router['id'],
neutron_subnet['id'])
# Assert the router port doesn't exist in OVN
self.assertIsNone(
self._find_router_port_row_by_port_id(neutron_obj['port_id']))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the router port was now created
self.assertIsNotNone(
self._find_router_port_row_by_port_id(neutron_obj['port_id']))
# > Delete
with mock.patch.object(self._l3_ovn_client, 'delete_router_port'):
self._remove_router_interface(neutron_router['id'],
neutron_subnet['id'])
# Assert the router port still exists in OVNDB
self.assertIsNotNone(
self._find_router_port_row_by_port_id(neutron_obj['port_id']))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the router port is now deleted from OVNDB
self.assertIsNone(
self._find_router_port_row_by_port_id(neutron_obj['port_id']))
# Assert the revision number no longer exists
self.assertIsNone(db_rev.get_revision_row(
self.context,
neutron_obj['port_id']))
def test_check_metadata_ports(self):
ovn_config.cfg.CONF.set_override('ovn_metadata_enabled', True,
group='ovn')
neutron_net = self._create_network('network1')
metadata_port = self._ovn_client._find_metadata_port(
self.context, neutron_net['id'])
# Assert the metadata port exists
self.assertIsNotNone(metadata_port)
# Delete the metadata port
self._delete('ports', metadata_port['id'])
metadata_port = self._ovn_client._find_metadata_port(
self.context, neutron_net['id'])
# Assert the metadata port is gone
self.assertIsNone(metadata_port)
# Call the maintenance thread to fix the problem, it will raise
# NeverAgain so that the job only runs once at startup
self.assertRaises(periodics.NeverAgain,
self.maint.check_metadata_ports)
metadata_port = self._ovn_client._find_metadata_port(
self.context, neutron_net['id'])
# Assert the metadata port was re-created
self.assertIsNotNone(metadata_port)
def test_check_metadata_ports_not_enabled(self):
ovn_config.cfg.CONF.set_override('ovn_metadata_enabled', False,
group='ovn')
with mock.patch.object(self._ovn_client,
'create_metadata_port') as mock_create_port:
self.assertRaises(periodics.NeverAgain,
self.maint.check_metadata_ports)
# Assert create_metadata_port() wasn't called since metadata
# is not enabled
self.assertFalse(mock_create_port.called)
def test_check_for_port_security_unknown_address(self):
neutron_net = self._create_network('network1')
neutron_port = self._create_port('port1', neutron_net['id'])
# Let's force disabling port security for the LSP
self.nb_api.lsp_set_port_security(neutron_port['id'], []).execute(
check_error=True)
ovn_port = self.nb_api.db_find(
'Logical_Switch_Port', ('name', '=', neutron_port['id'])).execute(
check_error=True)[0]
# Assert that port security is now disabled but the 'unknown'
# is not set in the addresses column
self.assertFalse(ovn_port['port_security'])
self.assertNotIn('unknown', ovn_port['addresses'])
# Call the maintenance task to fix the problem. Note that
# NeverAgain is raised so it only runs once at start up
self.assertRaises(periodics.NeverAgain,
self.maint.check_for_port_security_unknown_address)
ovn_port = self.nb_api.db_find(
'Logical_Switch_Port', ('name', '=', neutron_port['id'])).execute(
check_error=True)[0]
# Assert that 'unknown' was set in the addresses column for
# the port
self.assertFalse(ovn_port['port_security'])
self.assertIn('unknown', ovn_port['addresses'])
# Now the other way around, let's set port_security in the OVN
# table while the 'unknown' address is set in the addresses column
self.nb_api.lsp_set_port_security(
neutron_port['id'], ovn_port['addresses']).execute(
check_error=True)
ovn_port = self.nb_api.db_find(
'Logical_Switch_Port', ('name', '=', neutron_port['id'])).execute(
check_error=True)[0]
self.assertTrue(ovn_port['port_security'])
self.assertIn('unknown', ovn_port['addresses'])
# Call the maintenance task to fix the problem. Note that
# NeverAgain is raised so it only runs once at start up
self.assertRaises(periodics.NeverAgain,
self.maint.check_for_port_security_unknown_address)
ovn_port = self.nb_api.db_find(
'Logical_Switch_Port', ('name', '=', neutron_port['id'])).execute(
check_error=True)[0]
# Assert that 'unknown' was removed from the addresses column
# for the port
self.assertTrue(ovn_port['port_security'])
self.assertNotIn('unknown', ovn_port['addresses'])
def test_check_for_igmp_snooping_enabled(self):
cfg.CONF.set_override('igmp_snooping_enable', False, group='OVS')
net = self._create_network('net')
ls = self.nb_api.db_find('Logical_Switch',
('name', '=', utils.ovn_name(net['id']))).execute(
check_error=True)[0]
self.assertEqual('false', ls['other_config'][ovn_const.MCAST_SNOOP])
self.assertEqual(
'false', ls['other_config'][ovn_const.MCAST_FLOOD_UNREGISTERED])
# Change the value of the configuration
cfg.CONF.set_override('igmp_snooping_enable', True, group='OVS')
# Call the maintenance task and check that the value has been
# updated in the Logical Switch
self.assertRaises(periodics.NeverAgain,
self.maint.check_for_igmp_snoop_support)
ls = self.nb_api.db_find('Logical_Switch',
('name', '=', utils.ovn_name(net['id']))).execute(
check_error=True)[0]
self.assertEqual('true', ls['other_config'][ovn_const.MCAST_SNOOP])
self.assertEqual(
'false', ls['other_config'][ovn_const.MCAST_FLOOD_UNREGISTERED])
def test_floating_ip(self):
ext_net = self._create_network('ext_networktest', external=True)
ext_subnet = self._create_subnet(
'ext_subnettest',
ext_net['id'],
**{'cidr': '100.0.0.0/24',
'gateway_ip': '100.0.0.254',
'allocation_pools': [
{'start': '100.0.0.2', 'end': '100.0.0.253'}],
'enable_dhcp': False})
net1 = self._create_network('network1test', external=False)
subnet1 = self._create_subnet('subnet1test', net1['id'])
external_gateway_info = {
'enable_snat': True,
'network_id': ext_net['id'],
'external_fixed_ips': [
{'ip_address': '100.0.0.2', 'subnet_id': ext_subnet['id']}]}
router = self._create_router(
'routertest', external_gateway_info=external_gateway_info)
self._add_router_interface(router['id'], subnet1['id'])
p1 = self._create_port('testp1', net1['id'])
logical_ip = p1['fixed_ips'][0]['ip_address']
fip_info = {'floatingip': {
'description': 'test_fip',
'tenant_id': self._tenant_id,
'floating_network_id': ext_net['id'],
'port_id': p1['id'],
'fixed_ip_address': logical_ip}}
# > Create
with mock.patch.object(self._l3_ovn_client, 'create_floatingip'):
fip = self.l3_plugin.create_floatingip(self.context, fip_info)
floating_ip_address = fip['floating_ip_address']
self.assertEqual(router['id'], fip['router_id'])
self.assertEqual('testp1', fip['port_details']['name'])
self.assertIsNotNone(self.nb_api.get_lswitch_port(fip['port_id']))
# Assert the dnat_and_snat rule doesn't exist in OVN
self.assertIsNone(
self._find_nat_rule(router['id'], floating_ip_address, logical_ip))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the rule for the fip is now present
self.assertIsNotNone(
self._find_nat_rule(router['id'], floating_ip_address, logical_ip))
# > Update
p2 = self._create_port('testp2', net1['id'])
logical_ip = p2['fixed_ips'][0]['ip_address']
fip_info = {'floatingip': {
'port_id': p2['id'],
'fixed_ip_address': logical_ip}}
with mock.patch.object(self._l3_ovn_client, 'update_floatingip'):
self.l3_plugin.update_floatingip(self.context, fip['id'], fip_info)
# Assert the dnat_and_snat rule in OVN is still using p1's address
stale_nat_rule = self._find_nat_rule(router['id'], floating_ip_address)
self.assertEqual(p1['fixed_ips'][0]['ip_address'],
stale_nat_rule['logical_ip'])
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the rule for the fip is now updated
self.assertIsNotNone(
self._find_nat_rule(router['id'], floating_ip_address, logical_ip))
# > Delete
with mock.patch.object(self._l3_ovn_client, 'delete_floatingip'):
self.l3_plugin.delete_floatingip(self.context, fip['id'])
self.assertRaises(
lib_l3_exc.FloatingIPNotFound,
self.l3_plugin.get_floatingip, self.context, fip['id'])
# Assert the dnat_and_snat rule in OVN is still present
self.assertIsNotNone(
self._find_nat_rule(router['id'], floating_ip_address, logical_ip))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert the rule for the fip is now gone
self.assertIsNone(
self._find_nat_rule(router['id'], floating_ip_address))
# Assert the router snat rule is still there
snat_rule = self._find_nat_rule(
router['id'], '100.0.0.2', nat_type='snat')
self.assertEqual(subnet1['cidr'], snat_rule['logical_ip'])
def test_port_forwarding(self):
fip_attrs = lambda args: {
pf_def.RESOURCE_NAME: {pf_def.RESOURCE_NAME: args}}
def _verify_lb(test, protocol, vip_ext_port, vip_int_port):
ovn_lbs = self._find_pf_lb(router_id, fip_id)
test.assertEqual(1, len(ovn_lbs))
test.assertEqual('pf-floatingip-{}-{}'.format(fip_id, protocol),
ovn_lbs[0].name)
test.assertEqual(
{'{}:{}'.format(fip_ip, vip_ext_port):
'{}:{}'.format(p1_ip, vip_int_port)},
ovn_lbs[0].vips)
ext_net = self._create_network('ext_networktest', external=True)
ext_subnet = self._create_subnet(
'ext_subnettest',
ext_net['id'],
**{'cidr': '100.0.0.0/24',
'gateway_ip': '100.0.0.254',
'allocation_pools': [
{'start': '100.0.0.2', 'end': '100.0.0.253'}],
'enable_dhcp': False})
net1 = self._create_network('network1test', external=False)
subnet1 = self._create_subnet('subnet1test', net1['id'])
external_gateway_info = {
'enable_snat': True,
'network_id': ext_net['id'],
'external_fixed_ips': [
{'ip_address': '100.0.0.2', 'subnet_id': ext_subnet['id']}]}
router = self._create_router(
'routertest', external_gateway_info=external_gateway_info)
router_id = router['id']
self._add_router_interface(router['id'], subnet1['id'])
fip_info = {'floatingip': {
'tenant_id': self._tenant_id,
'floating_network_id': ext_net['id'],
'port_id': None,
'fixed_ip_address': None}}
fip = self.l3_plugin.create_floatingip(self.context, fip_info)
fip_id = fip['id']
fip_ip = fip['floating_ip_address']
p1 = self._create_port('testp1', net1['id'])
p1_ip = p1['fixed_ips'][0]['ip_address']
with mock.patch('neutron_lib.callbacks.registry.publish') as m_publish:
# > Create
fip_pf_args = {
pf_def.EXTERNAL_PORT: 2222,
pf_def.INTERNAL_PORT: 22,
pf_def.INTERNAL_PORT_ID: p1['id'],
pf_def.PROTOCOL: 'tcp',
pf_def.INTERNAL_IP_ADDRESS: p1_ip}
pf_obj = self.pf_plugin.create_floatingip_port_forwarding(
self.context, fip_id, **fip_attrs(fip_pf_args))
m_publish.assert_called_once()
# Assert load balancer for port forwarding was not created
self.assertFalse(self._find_pf_lb(router_id, fip_id))
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert load balancer for port forwarding was created
_verify_lb(self, 'tcp', 2222, 22)
# > Update
fip_pf_args = {pf_def.EXTERNAL_PORT: 5353,
pf_def.INTERNAL_PORT: 53,
pf_def.PROTOCOL: 'udp'}
m_publish.reset_mock()
self.pf_plugin.update_floatingip_port_forwarding(
self.context, pf_obj['id'], fip_id, **fip_attrs(fip_pf_args))
m_publish.assert_called_once()
# Assert load balancer for port forwarding is stale
_verify_lb(self, 'tcp', 2222, 22)
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert load balancer for port forwarding was updated
_verify_lb(self, 'udp', 5353, 53)
# > Delete
m_publish.reset_mock()
self.pf_plugin.delete_floatingip_port_forwarding(
self.context, pf_obj['id'], fip_id)
m_publish.assert_called_once()
# Assert load balancer for port forwarding is stale
_verify_lb(self, 'udp', 5353, 53)
# Call the maintenance thread to fix the problem
self.maint.check_for_inconsistencies()
# Assert load balancer for port forwarding is gone
self.assertFalse(self._find_pf_lb(router_id, fip_id))
| |
"""
Serializers used in various InvenTree apps
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import tablib
from decimal import Decimal
from collections import OrderedDict
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError as DjangoValidationError
from django.utils.translation import ugettext_lazy as _
from django.db import models
from djmoney.contrib.django_rest_framework.fields import MoneyField
from djmoney.money import Money
from djmoney.utils import MONEY_CLASSES, get_currency_field_name
from rest_framework import serializers
from rest_framework.utils import model_meta
from rest_framework.fields import empty
from rest_framework.exceptions import ValidationError
from rest_framework.serializers import DecimalField
from .models import extract_int
class InvenTreeMoneySerializer(MoneyField):
"""
Custom serializer for 'MoneyField',
which ensures that passed values are numerically valid
Ref: https://github.com/django-money/django-money/blob/master/djmoney/contrib/django_rest_framework/fields.py
"""
def __init__(self, *args, **kwargs):
kwargs["max_digits"] = kwargs.get("max_digits", 19)
kwargs["decimal_places"] = kwargs.get("decimal_places", 4)
super().__init__(*args, **kwargs)
def get_value(self, data):
"""
Test that the returned amount is a valid Decimal
"""
amount = super(DecimalField, self).get_value(data)
# Convert an empty string to None
if len(str(amount).strip()) == 0:
amount = None
try:
if amount is not None and amount is not empty:
amount = Decimal(amount)
except:
raise ValidationError({
self.field_name: [_("Must be a valid number")],
})
currency = data.get(get_currency_field_name(self.field_name), self.default_currency)
if currency and amount is not None and not isinstance(amount, MONEY_CLASSES) and amount is not empty:
return Money(amount, currency)
return amount
class UserSerializer(serializers.ModelSerializer):
""" Serializer for User - provides all fields """
class Meta:
model = User
fields = 'all'
class UserSerializerBrief(serializers.ModelSerializer):
""" Serializer for User - provides limited information """
class Meta:
model = User
fields = [
'pk',
'username',
]
class InvenTreeModelSerializer(serializers.ModelSerializer):
"""
Inherits the standard Django ModelSerializer class,
but also ensures that the underlying model class data are checked on validation.
"""
def __init__(self, instance=None, data=empty, **kwargs):
"""
Custom __init__ routine to ensure that *default* values (as specified in the ORM)
are used by the DRF serializers, *if* the values are not provided by the user.
"""
# If instance is None, we are creating a new instance
if instance is None and data is not empty:
if data is None:
data = OrderedDict()
else:
new_data = OrderedDict()
new_data.update(data)
data = new_data
# Add missing fields which have default values
ModelClass = self.Meta.model
fields = model_meta.get_field_info(ModelClass)
for field_name, field in fields.fields.items():
"""
Update the field IF (and ONLY IF):
- The field has a specified default value
- The field does not already have a value set
"""
if field.has_default() and field_name not in data:
value = field.default
# Account for callable functions
if callable(value):
try:
value = value()
except:
continue
data[field_name] = value
super().__init__(instance, data, **kwargs)
def get_initial(self):
"""
Construct initial data for the serializer.
Use the 'default' values specified by the django model definition
"""
initials = super().get_initial().copy()
# Are we creating a new instance?
if self.instance is None:
ModelClass = self.Meta.model
fields = model_meta.get_field_info(ModelClass)
for field_name, field in fields.fields.items():
if field.has_default() and field_name not in initials:
value = field.default
# Account for callable functions
if callable(value):
try:
value = value()
except:
continue
initials[field_name] = value
return initials
def save(self, **kwargs):
"""
Catch any django ValidationError thrown at the moment save() is called,
and re-throw as a DRF ValidationError
"""
try:
super().save(**kwargs)
except (ValidationError, DjangoValidationError) as exc:
raise ValidationError(detail=serializers.as_serializer_error(exc))
return self.instance
def update(self, instance, validated_data):
"""
Catch any django ValidationError, and re-throw as a DRF ValidationError
"""
try:
instance = super().update(instance, validated_data)
except (ValidationError, DjangoValidationError) as exc:
raise ValidationError(detail=serializers.as_serializer_error(exc))
return instance
def run_validation(self, data=empty):
"""
Perform serializer validation.
In addition to running validators on the serializer fields,
this class ensures that the underlying model is also validated.
"""
# Run any native validation checks first (may raise a ValidationError)
data = super().run_validation(data)
# Now ensure the underlying model is correct
if not hasattr(self, 'instance') or self.instance is None:
# No instance exists (we are creating a new one)
instance = self.Meta.model(**data)
else:
# Instance already exists (we are updating!)
instance = self.instance
# Update instance fields
for attr, value in data.items():
try:
setattr(instance, attr, value)
except (ValidationError, DjangoValidationError) as exc:
raise ValidationError(detail=serializers.as_serializer_error(exc))
# Run a 'full_clean' on the model.
# Note that by default, DRF does *not* perform full model validation!
try:
instance.full_clean()
except (ValidationError, DjangoValidationError) as exc:
data = exc.message_dict
# Change '__all__' key (django style) to 'non_field_errors' (DRF style)
if '__all__' in data:
data['non_field_errors'] = data['__all__']
del data['__all__']
raise ValidationError(data)
return data
class ReferenceIndexingSerializerMixin():
"""
This serializer mixin ensures the the reference is not to big / small
for the BigIntegerField
"""
def validate_reference(self, value):
if extract_int(value) > models.BigIntegerField.MAX_BIGINT:
raise serializers.ValidationError('reference is to to big')
return value
class InvenTreeAttachmentSerializerField(serializers.FileField):
"""
Override the DRF native FileField serializer,
to remove the leading server path.
For example, the FileField might supply something like:
http://127.0.0.1:8000/media/foo/bar.jpg
Whereas we wish to return:
/media/foo/bar.jpg
Why? You can't handle the why!
Actually, if the server process is serving the data at 127.0.0.1,
but a proxy service (e.g. nginx) is then providing DNS lookup to the outside world,
then an attachment which prefixes the "address" of the internal server
will not be accessible from the outside world.
"""
def to_representation(self, value):
if not value:
return None
return os.path.join(str(settings.MEDIA_URL), str(value))
class InvenTreeAttachmentSerializer(InvenTreeModelSerializer):
"""
Special case of an InvenTreeModelSerializer, which handles an "attachment" model.
The only real addition here is that we support "renaming" of the attachment file.
"""
attachment = InvenTreeAttachmentSerializerField(
required=False,
allow_null=False,
)
# The 'filename' field must be present in the serializer
filename = serializers.CharField(
label=_('Filename'),
required=False,
source='basename',
allow_blank=False,
)
class InvenTreeImageSerializerField(serializers.ImageField):
"""
Custom image serializer.
On upload, validate that the file is a valid image file
"""
def to_representation(self, value):
if not value:
return None
return os.path.join(str(settings.MEDIA_URL), str(value))
class InvenTreeDecimalField(serializers.FloatField):
"""
Custom serializer for decimal fields. Solves the following issues:
- The normal DRF DecimalField renders values with trailing zeros
- Using a FloatField can result in rounding issues: https://code.djangoproject.com/ticket/30290
"""
def to_internal_value(self, data):
# Convert the value to a string, and then a decimal
try:
return Decimal(str(data))
except:
raise serializers.ValidationError(_("Invalid value"))
class DataFileUploadSerializer(serializers.Serializer):
"""
Generic serializer for uploading a data file, and extracting a dataset.
- Validates uploaded file
- Extracts column names
- Extracts data rows
"""
# Implementing class should register a target model (database model) to be used for import
TARGET_MODEL = None
class Meta:
fields = [
'data_file',
]
data_file = serializers.FileField(
label=_("Data File"),
help_text=_("Select data file for upload"),
required=True,
allow_empty_file=False,
)
def validate_data_file(self, data_file):
"""
Perform validation checks on the uploaded data file.
"""
self.filename = data_file.name
name, ext = os.path.splitext(data_file.name)
# Remove the leading . from the extension
ext = ext[1:]
accepted_file_types = [
'xls', 'xlsx',
'csv', 'tsv',
'xml',
]
if ext not in accepted_file_types:
raise serializers.ValidationError(_("Unsupported file type"))
# Impose a 50MB limit on uploaded BOM files
max_upload_file_size = 50 * 1024 * 1024
if data_file.size > max_upload_file_size:
raise serializers.ValidationError(_("File is too large"))
# Read file data into memory (bytes object)
try:
data = data_file.read()
except Exception as e:
raise serializers.ValidationError(str(e))
if ext in ['csv', 'tsv', 'xml']:
try:
data = data.decode()
except Exception as e:
raise serializers.ValidationError(str(e))
# Convert to a tablib dataset (we expect headers)
try:
self.dataset = tablib.Dataset().load(data, ext, headers=True)
except Exception as e:
raise serializers.ValidationError(str(e))
if len(self.dataset.headers) == 0:
raise serializers.ValidationError(_("No columns found in file"))
if len(self.dataset) == 0:
raise serializers.ValidationError(_("No data rows found in file"))
return data_file
def match_column(self, column_name, field_names, exact=False):
"""
Attempt to match a column name (from the file) to a field (defined in the model)
Order of matching is:
- Direct match
- Case insensitive match
- Fuzzy match
"""
column_name = column_name.strip()
column_name_lower = column_name.lower()
if column_name in field_names:
return column_name
for field_name in field_names:
if field_name.lower() == column_name_lower:
return field_name
if exact:
# Finished available 'exact' matches
return None
# TODO: Fuzzy pattern matching for column names
# No matches found
return None
def extract_data(self):
"""
Returns dataset extracted from the file
"""
# Provide a dict of available import fields for the model
model_fields = {}
# Keep track of columns we have already extracted
matched_columns = set()
if self.TARGET_MODEL:
try:
model_fields = self.TARGET_MODEL.get_import_fields()
except:
pass
# Extract a list of valid model field names
model_field_names = [key for key in model_fields.keys()]
# Provide a dict of available columns from the dataset
file_columns = {}
for header in self.dataset.headers:
column = {}
# Attempt to "match" file columns to model fields
match = self.match_column(header, model_field_names, exact=True)
if match is not None and match not in matched_columns:
matched_columns.add(match)
column['value'] = match
else:
column['value'] = None
file_columns[header] = column
return {
'file_fields': file_columns,
'model_fields': model_fields,
'rows': [row.values() for row in self.dataset.dict],
'filename': self.filename,
}
def save(self):
...
class DataFileExtractSerializer(serializers.Serializer):
"""
Generic serializer for extracting data from an imported dataset.
- User provides an array of matched headers
- User provides an array of raw data rows
"""
# Implementing class should register a target model (database model) to be used for import
TARGET_MODEL = None
class Meta:
fields = [
'columns',
'rows',
]
# Mapping of columns
columns = serializers.ListField(
child=serializers.CharField(
allow_blank=True,
),
)
rows = serializers.ListField(
child=serializers.ListField(
child=serializers.CharField(
allow_blank=True,
allow_null=True,
),
)
)
def validate(self, data):
data = super().validate(data)
self.columns = data.get('columns', [])
self.rows = data.get('rows', [])
if len(self.rows) == 0:
raise serializers.ValidationError(_("No data rows provided"))
if len(self.columns) == 0:
raise serializers.ValidationError(_("No data columns supplied"))
self.validate_extracted_columns()
return data
@property
def data(self):
if self.TARGET_MODEL:
try:
model_fields = self.TARGET_MODEL.get_import_fields()
except:
model_fields = {}
rows = []
for row in self.rows:
"""
Optionally pre-process each row, before sending back to the client
"""
processed_row = self.process_row(self.row_to_dict(row))
if processed_row:
rows.append({
"original": row,
"data": processed_row,
})
return {
'fields': model_fields,
'columns': self.columns,
'rows': rows,
}
def process_row(self, row):
"""
Process a 'row' of data, which is a mapped column:value dict
Returns either a mapped column:value dict, or None.
If the function returns None, the column is ignored!
"""
# Default implementation simply returns the original row data
return row
def row_to_dict(self, row):
"""
Convert a "row" to a named data dict
"""
row_dict = {
'errors': {},
}
for idx, value in enumerate(row):
if idx < len(self.columns):
col = self.columns[idx]
if col:
row_dict[col] = value
return row_dict
def validate_extracted_columns(self):
"""
Perform custom validation of header mapping.
"""
if self.TARGET_MODEL:
try:
model_fields = self.TARGET_MODEL.get_import_fields()
except:
model_fields = {}
cols_seen = set()
for name, field in model_fields.items():
required = field.get('required', False)
# Check for missing required columns
if required:
if name not in self.columns:
raise serializers.ValidationError(_(f"Missing required column: '{name}'"))
for col in self.columns:
if not col:
continue
# Check for duplicated columns
if col in cols_seen:
raise serializers.ValidationError(_(f"Duplicate column: '{col}'"))
cols_seen.add(col)
def save(self):
"""
No "save" action for this serializer
"""
...
| |
#!/usr/bin/env python
#
#
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 NeuroPoly, Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Tanguy Magnan
# Modified: 2015-07-29
#
# License: see the LICENSE.TXT
#=======================================================================================================================
#
import sys, commands
# Get path of the toolbox
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
# Append path that contains scripts, to be able to load modules
sys.path.append(path_sct + '/scripts')
from sct_register_multimodal import Paramreg
def register_slicereg2d_pointwise(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='seg', algo='slicereg2d_pointwise', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, verbose=0):
"""Slice-by-slice regularized registration by translation of two segmentations.
First we estimate for each slice the translation vector by calculating the difference of position of the two centers of
mass of the two segmentations. Then we remove outliers using Median Absolute Deviation technique (MAD) and smooth
the translation along x and y axis using moving average hanning window. Eventually, we generate two warping fields
(forward and inverse) resulting from this regularized registration technique.
The segmentations must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
warp_forward_out: name of output forward warp (type: string)
warp_inverse_out: name of output inverse warp (type: string)
factor: sensibility factor for outlier detection (higher the factor, smaller the detection) (type: int or float)
verbose: display parameter (type: int, value: 0,1 or 2)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
if paramreg.type != 'seg':
print '\nERROR: Algorithm slicereg2d_pointwise only operates for segmentation type.'
sys.exit(2)
else:
from msct_register_regularized import register_seg, generate_warping_field
from numpy import asarray
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
# Calculate displacement
x_disp, y_disp = register_seg(fname_source, fname_dest)
# Change to array
x_disp_a = asarray(x_disp)
y_disp_a = asarray(y_disp)
# Detect outliers
mask_x_a = outliers_detection(x_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
mask_y_a = outliers_detection(y_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
# Replace value of outliers by linear interpolation using closest non-outlier points
x_disp_a_no_outliers = outliers_completion(mask_x_a, verbose=0)
y_disp_a_no_outliers = outliers_completion(mask_y_a, verbose=0)
# Smooth results
x_disp_smooth = smoothing_window(x_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose=verbose)
y_disp_smooth = smoothing_window(y_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose=verbose)
# Generate warping field
generate_warping_field(fname_dest, x_disp_smooth, y_disp_smooth, fname=warp_forward_out) #name_warp= 'step'+str(paramreg.step)
# Inverse warping field
generate_warping_field(fname_source, -x_disp_smooth, -y_disp_smooth, fname=warp_inverse_out)
def register_slicereg2d_translation(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='im', algo='Translation', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
fname_mask='', warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, remove_temp_files=1, verbose=0,
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '','bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}):
"""Slice-by-slice regularized registration by translation of two images.
We first register slice-by-slice the two images using antsRegistration in 2D. Then we remove outliers using
Median Absolute Deviation technique (MAD) and smooth the translations along x and y axis using moving average
hanning window. Eventually, we generate two warping fields (forward and inverse) resulting from this regularized
registration technique.
The images must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length[optional]: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
fname_mask[optional]: name of mask file (type: string) (parameter -x of antsRegistration)
warp_forward_out[optional]: name of output forward warp (type: string)
warp_inverse_out[optional]: name of output inverse warp (type: string)
factor[optional]: sensibility factor for outlier detection (higher the factor, smaller the detection)
(type: int or float)
remove_temp_files[optional]: 1 to remove, 0 to keep (type: int)
verbose[optional]: display parameter (type: int, value: 0,1 or 2)
ants_registration_params[optional]: specific algorithm's parameters for antsRegistration (type: dictionary)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
from msct_register_regularized import register_images, generate_warping_field
from numpy import asarray
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
# Calculate displacement
x_disp, y_disp = register_images(fname_source, fname_dest, mask=fname_mask, paramreg=paramreg, remove_tmp_folder=remove_temp_files, ants_registration_params=ants_registration_params)
# Change to array
x_disp_a = asarray(x_disp)
y_disp_a = asarray(y_disp)
# Detect outliers
mask_x_a = outliers_detection(x_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
mask_y_a = outliers_detection(y_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
# Replace value of outliers by linear interpolation using closest non-outlier points
x_disp_a_no_outliers = outliers_completion(mask_x_a, verbose=0)
y_disp_a_no_outliers = outliers_completion(mask_y_a, verbose=0)
# Smooth results
x_disp_smooth = smoothing_window(x_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose = verbose)
y_disp_smooth = smoothing_window(y_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose = verbose)
# Generate warping field
generate_warping_field(fname_dest, x_disp_smooth, y_disp_smooth, fname=warp_forward_out)
# Inverse warping field
generate_warping_field(fname_source, -x_disp_smooth, -y_disp_smooth, fname=warp_inverse_out)
def register_slicereg2d_rigid(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='im', algo='Rigid', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
fname_mask='', warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, remove_temp_files=1, verbose=0,
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '','bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}):
"""Slice-by-slice regularized registration (rigid) of two images.
We first register slice-by-slice the two images using antsRegistration in 2D. Then we remove outliers using
Median Absolute Deviation technique (MAD) and smooth the translations and angle of rotation along x and y axis using
moving average hanning window. Eventually, we generate two warping fields (forward and inverse) resulting from this
regularized registration technique.
The images must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length[optional]: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
fname_mask[optional]: name of mask file (type: string) (parameter -x of antsRegistration)
warp_forward_out[optional]: name of output forward warp (type: string)
warp_inverse_out[optional]: name of output inverse warp (type: string)
factor[optional]: sensibility factor for outlier detection (higher the factor, smaller the detection)
(type: int or float)
remove_temp_files[optional]: 1 to remove, 0 to keep (type: int)
verbose[optional]: display parameter (type: int, value: 0,1 or 2)
ants_registration_params[optional]: specific algorithm's parameters for antsRegistration (type: dictionary)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
from msct_register_regularized import register_images, generate_warping_field
from numpy import asarray
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
# Calculate displacement
x_disp, y_disp, theta_rot = register_images(fname_source, fname_dest, mask=fname_mask, paramreg=paramreg, remove_tmp_folder=remove_temp_files, ants_registration_params=ants_registration_params)
# Change to array
x_disp_a = asarray(x_disp)
y_disp_a = asarray(y_disp)
theta_rot_a = asarray(theta_rot)
# Detect outliers
mask_x_a = outliers_detection(x_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
mask_y_a = outliers_detection(y_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
mask_theta_a = outliers_detection(theta_rot_a, type='median', factor=2, return_filtered_signal='no', verbose=verbose)
# Replace value of outliers by linear interpolation using closest non-outlier points
x_disp_a_no_outliers = outliers_completion(mask_x_a, verbose=0)
y_disp_a_no_outliers = outliers_completion(mask_y_a, verbose=0)
theta_rot_a_no_outliers = outliers_completion(mask_theta_a, verbose=0)
# Smooth results
x_disp_smooth = smoothing_window(x_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose = verbose)
y_disp_smooth = smoothing_window(y_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose = verbose)
theta_rot_smooth = smoothing_window(theta_rot_a_no_outliers, window_len=int(window_length), window='hanning', verbose = verbose)
# Generate warping field
generate_warping_field(fname_dest, x_disp_smooth, y_disp_smooth, theta_rot_smooth, fname=warp_forward_out)
# Inverse warping field
generate_warping_field(fname_source, -x_disp_smooth, -y_disp_smooth, -theta_rot_smooth, fname=warp_inverse_out)
def register_slicereg2d_affine(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='im', algo='Affine', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
fname_mask='', warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, remove_temp_files=1, verbose=0,
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '','bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}):
"""Slice-by-slice regularized registration (affine) of two images.
We first register slice-by-slice the two images using antsRegistration in 2D (algo: affine) and create 3D warping
fields (forward and inverse) by merging the 2D warping fields along z. Then we directly detect outliers and smooth
the 3d warping fields applying a moving average hanning window on each pixel of the plan xOy (i.e. we consider that
for a position (x,y) in the plan xOy, the variation along z of the vector of displacement (xo, yo, zo) of the
warping field should not be too abrupt). Eventually, we generate two warping fields (forward and inverse) resulting
from this regularized registration technique.
The images must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length[optional]: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
fname_mask[optional]: name of mask file (type: string) (parameter -x of antsRegistration)
warp_forward_out[optional]: name of output forward warp (type: string)
warp_inverse_out[optional]: name of output inverse warp (type: string)
factor[optional]: sensibility factor for outlier detection (higher the factor, smaller the detection)
(type: int or float)
remove_temp_files[optional]: 1 to remove, 0 to keep (type: int)
verbose[optional]: display parameter (type: int, value: 0,1 or 2)
ants_registration_params[optional]: specific algorithm's parameters for antsRegistration (type: dictionary)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
from nibabel import load, Nifti1Image, save
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
from msct_register_regularized import register_images
from numpy import apply_along_axis, zeros
import sct_utils as sct
name_warp_syn = 'Warp_total_step_'+str(paramreg.step) # 'Warp_total'
# Calculate displacement
register_images(fname_source, fname_dest, mask=fname_mask, paramreg=paramreg, remove_tmp_folder=remove_temp_files, ants_registration_params=ants_registration_params)
print'\nRegularizing warping fields along z axis...'
print'\n\tSplitting warping fields ...'
# sct.run('isct_c3d -mcs ' + name_warp_syn + '.nii.gz -oo ' + name_warp_syn + '_x.nii.gz ' + name_warp_syn + '_y.nii.gz')
# sct.run('isct_c3d -mcs ' + name_warp_syn + '_inverse.nii.gz -oo ' + name_warp_syn + '_x_inverse.nii.gz ' + name_warp_syn + '_y_inverse.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '.nii.gz -w -mcs -o ' + name_warp_syn + '_x.nii.gz,' + name_warp_syn + '_y.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '_inverse.nii.gz -w -mcs -o ' + name_warp_syn + '_x_inverse.nii.gz,' + name_warp_syn + '_y_inverse.nii.gz')
data_warp_x = load(name_warp_syn + '_x.nii.gz').get_data()
data_warp_y = load(name_warp_syn + '_y.nii.gz').get_data()
hdr_warp = load(name_warp_syn + '_x.nii.gz').get_header()
data_warp_x_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_data()
data_warp_y_inverse = load(name_warp_syn + '_y_inverse.nii.gz').get_data()
hdr_warp_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_header()
#Outliers deletion
print'\n\tDeleting outliers...'
mask_x_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x)
mask_y_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y)
mask_x_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x_inverse)
mask_y_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y_inverse)
#Outliers replacement by linear interpolation using closest non-outlier points
data_warp_x_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_a)
data_warp_y_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_a)
data_warp_x_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_inverse_a)
data_warp_y_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_inverse_a)
#Smoothing of results along z
print'\n\tSmoothing results...'
data_warp_x_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_no_outliers)
data_warp_x_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_inverse_no_outliers)
data_warp_y_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_no_outliers)
data_warp_y_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_inverse_no_outliers)
print'\nSaving regularized warping fields...'
'''
from sct_maths import multicomponent_merge
from msct_image import Image
data_warp_smooth = multicomponent_merge([data_warp_x_smooth, data_warp_y_smooth])[0]
hdr_warp.set_intent('vector', (), '')
warp_smooth = Image(param=data_warp_smooth, absolutepath=warp_forward_out, hdr=hdr_warp)
warp_smooth.save()
data_warp_smooth_inverse = multicomponent_merge([data_warp_x_smooth_inverse, data_warp_y_smooth_inverse])[0]
hdr_warp_inverse.set_intent('vector', (), '')
warp_smooth_inverse = Image(param=data_warp_smooth_inverse, absolutepath=warp_inverse_out, hdr=hdr_warp_inverse)
warp_smooth_inverse.save()
'''
#Get image dimensions of destination image
from msct_image import Image
nx, ny, nz, nt, px, py, pz, pt = Image(fname_dest).dim
data_warp_smooth = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth[:,:,:,0,0] = data_warp_x_smooth
data_warp_smooth[:,:,:,0,1] = data_warp_y_smooth
data_warp_smooth_inverse = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth_inverse[:,:,:,0,0] = data_warp_x_smooth_inverse
data_warp_smooth_inverse[:,:,:,0,1] = data_warp_y_smooth_inverse
# Force header's parameter to intent so that the file may be recognised as a warping field by ants
hdr_warp.set_intent('vector', (), '')
hdr_warp_inverse.set_intent('vector', (), '')
img = Nifti1Image(data_warp_smooth, None, header=hdr_warp)
img_inverse = Nifti1Image(data_warp_smooth_inverse, None, header=hdr_warp_inverse)
save(img, filename=warp_forward_out)
print'\tFile ' + warp_forward_out + ' saved.'
save(img_inverse, filename=warp_inverse_out)
print'\tFile ' + warp_inverse_out + ' saved.'
def register_slicereg2d_syn(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='im', algo='SyN', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
fname_mask='', warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, remove_temp_files=1, verbose=0,
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '','bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}):
"""Slice-by-slice regularized registration (syn) of two images.
We first register slice-by-slice the two images using antsRegistration in 2D (algo: syn) and create 3D warping
fields (forward and inverse) by merging the 2D warping fields along z. Then we directly detect outliers and smooth
the 3d warping fields applying a moving average hanning window on each pixel of the plan xOy (i.e. we consider that
for a position (x,y) in the plan xOy, the variation along z of the vector of displacement (xo, yo, zo) of the
warping field should not be too abrupt). Eventually, we generate two warping fields (forward and inverse) resulting
from this regularized registration technique.
The images must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length[optional]: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
fname_mask[optional]: name of mask file (type: string) (parameter -x of antsRegistration)
warp_forward_out[optional]: name of output forward warp (type: string)
warp_inverse_out[optional]: name of output inverse warp (type: string)
factor[optional]: sensibility factor for outlier detection (higher the factor, smaller the detection)
(type: int or float)
remove_temp_files[optional]: 1 to remove, 0 to keep (type: int)
verbose[optional]: display parameter (type: int, value: 0,1 or 2)
ants_registration_params[optional]: specific algorithm's parameters for antsRegistration (type: dictionary)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
from nibabel import load, Nifti1Image, save
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
from msct_register_regularized import register_images
from numpy import apply_along_axis, zeros
import sct_utils as sct
name_warp_syn = 'Warp_total'
# Registrating images
register_images(fname_source, fname_dest, mask=fname_mask, paramreg=paramreg, remove_tmp_folder=remove_temp_files, ants_registration_params=ants_registration_params)
print'\nRegularizing warping fields along z axis...'
print'\n\tSplitting warping fields ...'
# sct.run('isct_c3d -mcs ' + name_warp_syn + '.nii.gz -oo ' + name_warp_syn + '_x.nii.gz ' + name_warp_syn + '_y.nii.gz')
# sct.run('isct_c3d -mcs ' + name_warp_syn + '_inverse.nii.gz -oo ' + name_warp_syn + '_x_inverse.nii.gz ' + name_warp_syn + '_y_inverse.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '.nii.gz -w -mcs -o ' + name_warp_syn + '_x.nii.gz,' + name_warp_syn + '_y.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '_inverse.nii.gz -w -mcs -o ' + name_warp_syn + '_x_inverse.nii.gz,' + name_warp_syn + '_y_inverse.nii.gz')
im_warp_x = Image(name_warp_syn + '_x.nii.gz')
data_warp_x = im_warp_x.data
im_warp_y = Image(name_warp_syn + '_y.nii.gz')
data_warp_y = im_warp_y.data
hdr_warp = im_warp_x.hdr
# data_warp_x = load(name_warp_syn + '_x.nii.gz').get_data()
# data_warp_y = load(name_warp_syn + '_y.nii.gz').get_data()
# hdr_warp = load(name_warp_syn + '_x.nii.gz').get_header()
im_warp_x_inverse = Image(name_warp_syn + '_x_inverse.nii.gz')
data_warp_x_inverse = im_warp_x_inverse.data
im_warp_y_inverse = Image(name_warp_syn + '_y_inverse.nii.gz')
data_warp_y_inverse = im_warp_y_inverse.data
hdr_warp_inverse = im_warp_x_inverse.hdr
# data_warp_x_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_data()
# data_warp_y_inverse = load(name_warp_syn + '_y_inverse.nii.gz').get_data()
# hdr_warp_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_header()
#Outliers deletion
print'\n\tDeleting outliers...'
mask_x_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x)
mask_y_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y)
mask_x_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x_inverse)
mask_y_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y_inverse)
#Outliers replacement by linear interpolation using closest non-outlier points
data_warp_x_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_a)
data_warp_y_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_a)
data_warp_x_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_inverse_a)
data_warp_y_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_inverse_a)
#Smoothing of results along z
print'\n\tSmoothing results...'
data_warp_x_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_no_outliers)
data_warp_x_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_inverse_no_outliers)
data_warp_y_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_no_outliers)
data_warp_y_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_inverse_no_outliers)
print'\nSaving regularized warping fields...'
#Get image dimensions of destination image
from msct_image import Image
nx, ny, nz, nt, px, py, pz, pt = Image(fname_dest).dim
data_warp_smooth = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth[:,:,:,0,0] = data_warp_x_smooth
data_warp_smooth[:,:,:,0,1] = data_warp_y_smooth
data_warp_smooth_inverse = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth_inverse[:,:,:,0,0] = data_warp_x_smooth_inverse
data_warp_smooth_inverse[:,:,:,0,1] = data_warp_y_smooth_inverse
# Force header's parameter to intent so that the file may be recognised as a warping field by ants
hdr_warp.set_intent('vector', (), '')
hdr_warp_inverse.set_intent('vector', (), '')
img = Nifti1Image(data_warp_smooth, None, header=hdr_warp)
img_inverse = Nifti1Image(data_warp_smooth_inverse, None, header=hdr_warp_inverse)
save(img, filename=warp_forward_out)
print'\tFile ' + warp_forward_out + ' saved.'
save(img_inverse, filename=warp_inverse_out)
print'\tFile ' + warp_inverse_out + ' saved.'
def register_slicereg2d_bsplinesyn(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='im', algo='BSplineSyN', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
fname_mask='', warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, remove_temp_files=1, verbose=0,
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '','bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}):
"""Slice-by-slice regularized registration (bsplinesyn) of two images.
We first register slice-by-slice the two images using antsRegistration in 2D (algo: bsplinesyn) and create 3D warping
fields (forward and inverse) by merging the 2D warping fields along z. Then we directly detect outliers and smooth
the 3d warping fields applying a moving average hanning window on each pixel of the plan xOy (i.e. we consider that
for a position (x,y) in the plan xOy, the variation along z of the vector of displacement (xo, yo, zo) of the
warping field should not be too abrupt). Eventually, we generate two warping fields (forward and inverse) resulting
from this regularized registration technique.
The images must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length[optional]: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
fname_mask[optional]: name of mask file (type: string) (parameter -x of antsRegistration)
warp_forward_out[optional]: name of output forward warp (type: string)
warp_inverse_out[optional]: name of output inverse warp (type: string)
factor[optional]: sensibility factor for outlier detection (higher the factor, smaller the detection)
(type: int or float)
remove_temp_files[optional]: 1 to remove, 0 to keep (type: int)
verbose[optional]: display parameter (type: int, value: 0,1 or 2)
ants_registration_params[optional]: specific algorithm's parameters for antsRegistration (type: dictionary)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
from nibabel import load, Nifti1Image, save
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
from msct_register_regularized import register_images
from numpy import apply_along_axis, zeros
import sct_utils as sct
from msct_image import Image
name_warp_syn = 'Warp_total'
# Registrating images
register_images(fname_source, fname_dest, mask=fname_mask, paramreg=paramreg, remove_tmp_folder=remove_temp_files, ants_registration_params=ants_registration_params)
print'\nRegularizing warping fields along z axis...'
print'\n\tSplitting warping fields ...'
# sct.run('isct_c3d -mcs ' + name_warp_syn + '.nii.gz -oo ' + name_warp_syn + '_x.nii.gz ' + name_warp_syn + '_y.nii.gz')
# sct.run('isct_c3d -mcs ' + name_warp_syn + '_inverse.nii.gz -oo ' + name_warp_syn + '_x_inverse.nii.gz ' + name_warp_syn + '_y_inverse.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '.nii.gz -w -mcs -o ' + name_warp_syn + '_x.nii.gz,' + name_warp_syn + '_y.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '_inverse.nii.gz -w -mcs -o ' + name_warp_syn + '_x_inverse.nii.gz,' + name_warp_syn + '_y_inverse.nii.gz')
data_warp_x = load(name_warp_syn + '_x.nii.gz').get_data()
data_warp_y = load(name_warp_syn + '_y.nii.gz').get_data()
hdr_warp = load(name_warp_syn + '_x.nii.gz').get_header()
data_warp_x_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_data()
data_warp_y_inverse = load(name_warp_syn + '_y_inverse.nii.gz').get_data()
hdr_warp_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_header()
#Outliers deletion
print'\n\tDeleting outliers...'
mask_x_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x)
mask_y_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y)
mask_x_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x_inverse)
mask_y_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y_inverse)
#Outliers replacement by linear interpolation using closest non-outlier points
data_warp_x_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_a)
data_warp_y_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_a)
data_warp_x_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_inverse_a)
data_warp_y_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_inverse_a)
#Smoothing of results along z
print'\n\tSmoothing results...'
data_warp_x_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_no_outliers)
data_warp_x_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_inverse_no_outliers)
data_warp_y_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_no_outliers)
data_warp_y_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_inverse_no_outliers)
print'\nSaving regularized warping fields...'
#Get image dimensions of destination image
nx, ny, nz, nt, px, py, pz, pt = Image(fname_dest).dim
data_warp_smooth = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth[:,:,:,0,0] = data_warp_x_smooth
data_warp_smooth[:,:,:,0,1] = data_warp_y_smooth
data_warp_smooth_inverse = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth_inverse[:,:,:,0,0] = data_warp_x_smooth_inverse
data_warp_smooth_inverse[:,:,:,0,1] = data_warp_y_smooth_inverse
# Force header's parameter to intent so that the file may be recognised as a warping field by ants
hdr_warp.set_intent('vector', (), '')
hdr_warp_inverse.set_intent('vector', (), '')
img = Nifti1Image(data_warp_smooth, None, header=hdr_warp)
img_inverse = Nifti1Image(data_warp_smooth_inverse, None, header=hdr_warp_inverse)
save(img, filename=warp_forward_out)
print'\tFile ' + warp_forward_out + ' saved.'
save(img_inverse, filename=warp_inverse_out)
print'\tFile ' + warp_inverse_out + ' saved.'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.