gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# pylint: disable=missing-docstring
"""
Test the "external" interface in version 1.0.
This file should remain in the repository as long as we're still backwrds
compatible with 1.0. This includes call signatures *including* return types!
"""
import datetime
from collections import OrderedDict
from unittest.mock import patch
from x690.types import ObjectIdentifier, OctetString
import puresnmp as snmp
from puresnmp.const import Version
from puresnmp.pdu import VarBind
from puresnmp.util import BulkResult
from .. import ByteTester, readbytes, readbytes_multiple
OID = ObjectIdentifier.from_string
TESTS_SHOULD_RUN = True # TODO should use importlib-metadata for this
def assert_of_types(values, types):
"""
Checks the types of a collection of values. Raises an AssertionError if
something is of the wrong type.
This assumes that *values* and *types* are collections of tuples. Example::
>>> values = [(1, 2), (1, 'foo')]
>>> expected_types = [(int, int), (int, str)]
>>> assert_of_types(values, expected_types)
"""
for result, expected in zip(values, types):
for result_value, result_type in zip(result, expected):
if not isinstance(result_value, result_type):
raise AssertionError(
"%r is not of type %r" % (result_value, result_type)
)
class TestGet(ByteTester):
def setUp(self):
patcher = patch("puresnmp.api.raw.Transport")
Transport = patcher.start() # pylint: disable=invalid-name
self.transport = Transport()
self.transport.get_request_id.return_value = 1
self.addCleanup(lambda: patcher.stop)
def test_get(self):
response = readbytes("apiv1/get_response.hex")
self.transport.send.return_value = response
result = snmp.get("192.168.1.1", "private", "1.3.6.1.2.1.1.2.0")
self.assertEqual(result, "1.3.6.1.4.1.8072.3.2.10")
self.assertTrue(self.transport.send.called, "method was not called")
self.assertIsInstance(result, str)
def test_multiget(self):
response = readbytes("apiv1/multiget_response.hex")
self.transport.send.return_value = response
oids = ["1.3.6.1.2.1.1.4.0", "1.3.6.1.2.1.1.6.0"]
result = snmp.multiget(
"192.168.1.1", "private", oids, port=161, timeout=1
)
self.assertEqual(result, [b"root", b"On the move"])
self.assertTrue(self.transport.send.called, "method was not called")
for value in result:
self.assertIsInstance(value, bytes)
def test_multiget_v1(self):
response = readbytes("apiv1/multiget_response.hex")
self.transport.send.return_value = response
oids = ["1.3.6.1.2.1.1.4.0", "1.3.6.1.2.1.1.6.0"]
result = snmp.multiget(
"192.168.1.1",
"private",
oids,
port=161,
timeout=1,
version=Version.V1,
)
self.assertEqual(result, [b"root", b"On the move"])
self.assertTrue(self.transport.send.called, "method was not called")
for value in result:
self.assertIsInstance(value, bytes)
def test_getnext(self):
response = readbytes("apiv1/getnext_response.hex")
self.transport.send.return_value = response
result = snmp.getnext(
"192.168.1.1", "private", "1.3.6.1.2.1.1.6.0", port=161, timeout=1
)
expected = VarBind(OID("1.3.6.1.2.1.1.7.0"), 72)
self.assertEqual(result, expected)
self.assertTrue(self.transport.send.called, "method was not called")
self.assertIsInstance(result.oid, ObjectIdentifier)
self.assertIsInstance(result.value, int)
def test_bulkget(self):
response = readbytes("apiv1/bulkget_response.hex")
self.transport.send.return_value = response
result = snmp.bulkget(
"192.168.1.1",
"private",
scalar_oids=["1.3.6.1.2.1.1.1.0"],
repeating_oids=[
"1.3.6.1.2.1.3.1.1",
"1.3.6.1.2.1.4",
],
max_list_size=10,
port=161,
timeout=1,
)
expected = BulkResult(
{"1.3.6.1.2.1.1.2.0": "1.3.6.1.4.1.8072.3.2.10"},
OrderedDict(
[
("1.3.6.1.2.1.3.1.1.1.12.1.172.17.0.1", 12),
("1.3.6.1.2.1.4.1.0", 1),
("1.3.6.1.2.1.3.1.1.2.12.1.172.17.0.1", b"\x02B@j\xbf\xcd"),
("1.3.6.1.2.1.4.2.0", 64),
(
"1.3.6.1.2.1.3.1.1.3.12.1.172.17.0.1",
b"\xac\x11\x00\x01",
),
("1.3.6.1.2.1.4.3.0", 589),
("1.3.6.1.2.1.4.4.0", 0),
("1.3.6.1.2.1.4.5.0", 0),
("1.3.6.1.2.1.4.6.0", 0),
("1.3.6.1.2.1.4.7.0", 0),
("1.3.6.1.2.1.4.8.0", 0),
("1.3.6.1.2.1.4.9.0", 410),
("1.3.6.1.2.1.4.10.0", 409),
]
),
)
self.assertEqual(result, expected)
self.assertTrue(self.transport.send.called, "method was not called")
key_types = {type(k) for k in result.listing.keys()}
self.assertIsInstance(result.scalars["1.3.6.1.2.1.1.2.0"], str)
self.assertEqual(key_types, {str})
expected_types = [
int,
int,
bytes,
int,
bytes,
int,
int,
int,
int,
int,
int,
int,
int,
]
for value, type_ in zip(result.listing.values(), expected_types):
self.assertIsInstance(value, type_)
def test_bulkget_v1(self):
response = readbytes("apiv1/bulkget_response.hex")
self.transport.send.return_value = response
result = snmp.bulkget(
"192.168.1.1",
"private",
scalar_oids=["1.3.6.1.2.1.1.1.0"],
repeating_oids=[
"1.3.6.1.2.1.3.1.1",
"1.3.6.1.2.1.4",
],
max_list_size=10,
port=161,
timeout=1,
version=Version.V1,
)
expected = BulkResult(
{"1.3.6.1.2.1.1.2.0": "1.3.6.1.4.1.8072.3.2.10"},
OrderedDict(
[
("1.3.6.1.2.1.3.1.1.1.12.1.172.17.0.1", 12),
("1.3.6.1.2.1.4.1.0", 1),
("1.3.6.1.2.1.3.1.1.2.12.1.172.17.0.1", b"\x02B@j\xbf\xcd"),
("1.3.6.1.2.1.4.2.0", 64),
(
"1.3.6.1.2.1.3.1.1.3.12.1.172.17.0.1",
b"\xac\x11\x00\x01",
),
("1.3.6.1.2.1.4.3.0", 589),
("1.3.6.1.2.1.4.4.0", 0),
("1.3.6.1.2.1.4.5.0", 0),
("1.3.6.1.2.1.4.6.0", 0),
("1.3.6.1.2.1.4.7.0", 0),
("1.3.6.1.2.1.4.8.0", 0),
("1.3.6.1.2.1.4.9.0", 410),
("1.3.6.1.2.1.4.10.0", 409),
]
),
)
self.assertEqual(result, expected)
self.assertTrue(self.transport.send.called, "method was not called")
key_types = {type(k) for k in result.listing.keys()}
self.assertIsInstance(result.scalars["1.3.6.1.2.1.1.2.0"], str)
self.assertEqual(key_types, {str})
expected_types = [
int,
int,
bytes,
int,
bytes,
int,
int,
int,
int,
int,
int,
int,
int,
]
for value, type_ in zip(result.listing.values(), expected_types):
self.assertIsInstance(value, type_)
def test_bulkwalk(self):
response = readbytes_multiple("apiv1/bulkwalk_response.hex")
self.transport.send.side_effect = response
result = list(
snmp.bulkwalk("192.168.1.1", "private", ["1.3.6.1.2.1.1.9.1.4"])
)
expected = [
VarBind(OID("1.3.6.1.2.1.1.9.1.4.1"), datetime.timedelta(0)),
VarBind(OID("1.3.6.1.2.1.1.9.1.4.2"), datetime.timedelta(0)),
VarBind(OID("1.3.6.1.2.1.1.9.1.4.3"), datetime.timedelta(0)),
VarBind(OID("1.3.6.1.2.1.1.9.1.4.4"), datetime.timedelta(0)),
VarBind(OID("1.3.6.1.2.1.1.9.1.4.5"), datetime.timedelta(0)),
VarBind(OID("1.3.6.1.2.1.1.9.1.4.6"), datetime.timedelta(0)),
VarBind(OID("1.3.6.1.2.1.1.9.1.4.7"), datetime.timedelta(0)),
VarBind(OID("1.3.6.1.2.1.1.9.1.4.8"), datetime.timedelta(0)),
VarBind(OID("1.3.6.1.2.1.1.9.1.4.9"), datetime.timedelta(0)),
VarBind(OID("1.3.6.1.2.1.1.9.1.4.10"), datetime.timedelta(0)),
]
self.assertEqual(result, expected)
self.assertTrue(self.transport.send.called, "method was not called")
expected_types = [(ObjectIdentifier, datetime.timedelta)] * 10
returned_values = [(row.oid, row.value) for row in result]
assert_of_types(returned_values, expected_types)
def test_multigetnext(self):
response = readbytes("apiv1/multigetnext_response.hex")
self.transport.send.return_value = response
result = snmp.multigetnext(
"192.168.1.1", "private", ["1.3.6.1.2.1.3.1.1", "1.3.6.1.2.1.4"]
)
expected = [
VarBind(OID("1.3.6.1.2.1.3.1.1.1.12.1.172.17.0.1"), 12),
VarBind(OID("1.3.6.1.2.1.4.1.0"), 1),
]
self.assertEqual(result, expected)
self.assertTrue(self.transport.send.called, "method was not called")
expected_types = [
(ObjectIdentifier, int),
(ObjectIdentifier, int),
]
returned_values = [(row.oid, row.value) for row in result]
assert_of_types(returned_values, expected_types)
def test_multigetnext_v1(self):
response = readbytes("apiv1/multigetnext_response.hex")
self.transport.send.return_value = response
result = snmp.multigetnext(
"192.168.1.1",
"private",
["1.3.6.1.2.1.3.1.1", "1.3.6.1.2.1.4"],
version=Version.V1,
)
expected = [
VarBind(OID("1.3.6.1.2.1.3.1.1.1.12.1.172.17.0.1"), 12),
VarBind(OID("1.3.6.1.2.1.4.1.0"), 1),
]
self.assertEqual(result, expected)
self.assertTrue(self.transport.send.called, "method was not called")
expected_types = [
(ObjectIdentifier, int),
(ObjectIdentifier, int),
]
returned_values = [(row.oid, row.value) for row in result]
assert_of_types(returned_values, expected_types)
def test_multiset(self):
response = readbytes("apiv1/multiset_response.hex")
self.transport.send.return_value = response
result = snmp.multiset(
"127.0.0.1",
"private",
[
("1.3.6.1.2.1.1.4.0", OctetString(b"foo")),
("1.3.6.1.2.1.1.6.0", OctetString(b"bar")),
],
)
expected = {"1.3.6.1.2.1.1.4.0": b"foo", "1.3.6.1.2.1.1.6.0": b"bar"}
self.assertEqual(result, expected)
self.assertTrue(self.transport.send.called, "method was not called")
self.assertIsInstance(result, dict)
key_types = {type(key) for key in result.keys()}
self.assertEqual(key_types, {str})
value_types = {type(value) for value in result.values()}
self.assertEqual(value_types, {bytes})
def test_multiwalk(self):
response_1 = readbytes("apiv1/multiwalk_response_1.hex")
response_2 = readbytes("apiv1/multiwalk_response_2.hex")
response_3 = readbytes("apiv1/multiwalk_response_3.hex")
self.transport.send.side_effect = [
response_1,
response_2,
response_3,
]
result = snmp.multiwalk(
"127.0.0.1",
"private",
[
"1.3.6.1.2.1.2.2.1.1",
"1.3.6.1.2.1.1.2.1",
],
)
expected = [
VarBind(OID("1.3.6.1.2.1.2.2.1.1.1"), 1),
VarBind(OID("1.3.6.1.2.1.2.2.1.1.6"), 6),
]
result = list(result)
self.assertEqual(result, expected)
expected_types = [
(ObjectIdentifier, int),
(ObjectIdentifier, int),
]
returned_values = [(row.oid, row.value) for row in result]
assert_of_types(returned_values, expected_types)
def test_multiwalk_v1(self):
response_1 = readbytes("apiv1/multiwalk_response_1.hex")
response_2 = readbytes("apiv1/multiwalk_response_2.hex")
response_3 = readbytes("apiv1/multiwalk_response_3.hex")
self.transport.send.side_effect = [
response_1,
response_2,
response_3,
]
result = snmp.multiwalk(
"127.0.0.1",
"private",
[
"1.3.6.1.2.1.2.2.1.1",
"1.3.6.1.2.1.1.2.1",
],
version=Version.V1,
)
expected = [
VarBind(OID("1.3.6.1.2.1.2.2.1.1.1"), 1),
VarBind(OID("1.3.6.1.2.1.2.2.1.1.6"), 6),
]
result = list(result)
self.assertEqual(result, expected)
expected_types = [
(ObjectIdentifier, int),
(ObjectIdentifier, int),
]
returned_values = [(row.oid, row.value) for row in result]
assert_of_types(returned_values, expected_types)
def test_set(self):
self.transport.send.return_value = readbytes("apiv1/set_response.hex")
result = snmp.set(
"127.0.0.1", "private", "1.3.6.1.2.1.1.6.0", OctetString(b"Hello")
)
self.assertEqual(result, b"On the move")
self.assertTrue(self.transport.send.called, "method was not called")
self.assertIsInstance(result, bytes)
def test_table(self):
responses = readbytes_multiple("apiv1/table_response.hex")
self.transport.send.side_effect = responses
result = snmp.table("127.0.0.1", "private", "1.3.6.1.2.1.2.2")
# Sort the result for Python < 3.6 (unsorted dicts)
result = sorted(result, key=lambda item: item["0"])
expected = [
{
"0": "1",
"1": 1,
"10": 172,
"11": 2,
"12": 0,
"13": 0,
"14": 0,
"15": 0,
"16": 172,
"17": 2,
"18": 0,
"19": 0,
"2": b"lo",
"20": 0,
"21": 0,
"22": "0.0",
"3": 24,
"4": 65536,
"5": 10000000,
"6": b"",
"7": 1,
"8": 1,
"9": datetime.timedelta(0),
},
{
"0": "12",
"1": 12,
"10": 13952,
"11": 140,
"12": 0,
"13": 0,
"14": 0,
"15": 0,
"16": 4391,
"17": 51,
"18": 0,
"19": 0,
"2": b"eth0",
"20": 0,
"21": 0,
"22": "0.0",
"3": 6,
"4": 1500,
"5": 4294967295,
"6": b"\x02B\xac\x11\x00\x02",
"7": 1,
"8": 1,
"9": datetime.timedelta(0),
},
]
self.assertEqual(result, expected)
self.assertEqual(self.transport.send.call_count, 45)
for row in result:
self.assertIsInstance(row, dict)
dict_types = {type(key) for key in row.keys()}
self.assertEqual(dict_types, {str})
def test_walk(self):
responses = readbytes_multiple("apiv1/walk_response.hex")
self.transport.send.side_effect = responses
result = snmp.walk("127.0.0.1", "private", "1.3.6.1.2.1.2.2.1.1")
result = list(result)
expected = [
VarBind(oid=OID("1.3.6.1.2.1.2.2.1.1.1"), value=1),
VarBind(oid=OID("1.3.6.1.2.1.2.2.1.1.12"), value=12),
]
self.assertCountEqual(result, expected)
self.assertEqual(self.transport.send.call_count, 3)
expected_types = [
(ObjectIdentifier, int),
(ObjectIdentifier, int),
]
returned_values = [(row.oid, row.value) for row in result]
assert_of_types(returned_values, expected_types)
| |
__author__ = "Andre Merzky, Ole Weidner"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
""" Exception classes
"""
import sys
import weakref
import operator
import traceback
# We have the choice of doing signature checks in exceptions, or to raise saga
# exceptions on signature checks -- we cannot do both. At this point, we use
# the saga.exceptions in signatures, thus can *not* have signature checks
# here...
#
# import saga.base as sb
# ------------------------------------------------------------------------------
#
class SagaException (Exception) :
"""
The Exception class encapsulates information about error conditions
encountered in SAGA.
Additionally to the error message (e.message), the exception also provides
a trace to the code location where the error condition got raised
(e.traceback).
B{Example}::
try :
file = saga.filesystem.File ("sftp://alamo.futuregrid.org/tmp/data1.dat")
except saga.Timeout as to :
# maybe the network is down?
print "connection timed out"
except saga.Exception as e :
# something else went wrong
print "Exception occurred: %s %s" % (e, e.traceback)
There are cases where multiple backends can report errors at the same time.
In that case, the saga-python implementation will collect the exceptions,
sort them by their 'rank', and return the highest ranked one. All other
catched exceptions are available via :func:`get_all_exceptions`, or via the
`exceptions` property.
The rank of an exception defines its explicity: in general terms: the higher
the rank, the better defined / known is the cause of the problem.
"""
# --------------------------------------------------------------------------
#
def __init__ (self, msg, parent=None, api_object=None, from_log=False) :
"""
Create a new exception object.
:param msg: The exception message.
:param parent: Original exception
:param api_object: The object that has caused the exception, default is
None.
:param from_log: Exception c'tor originates from the static log_
member method (ignore in exception stack!)
"""
Exception.__init__(self, msg)
self._plain_message = msg
self._exceptions = [self]
self._top_exception = self
self._ptype = type(parent).__name__ # parent exception type
self._stype = type(self ).__name__ # own exception type
ignore_stack = 2
if from_log :
ignore_stack += 1
if api_object :
self._object = weakref.ref (api_object)
else :
self._object = None
# did we get a parent exception?
if parent :
# if so, then this exception is likely created in some 'except'
# clause, as a reaction on a previously catched exception (the
# parent). Thus we append the message of the parent to our own
# message, but keep the parent's traceback (after all, the original
# exception location is what we are interested in).
#
if isinstance (parent, SagaException) :
# that all works nicely when parent is our own exception type...
self._traceback = parent.traceback
frame = traceback.extract_stack ()[- ignore_stack]
line = "%s +%s (%s) : %s" % frame
self._message = " %-20s: %s (%s)\n%s" \
% (self._stype, msg, line, parent.msg)
else :
if self._stype != "NoneType" :
# ... but if parent is a native (or any other) exception type,
# we don't have a traceback really -- so we dig it out of
# sys.exc_info.
trace = sys.exc_info ()[2]
stack = traceback.extract_tb (trace)
traceback_list = traceback.format_list (stack)
self._traceback = "".join (traceback_list)
# the message composition is very similar -- we just inject the
# parent exception type inconspicuously somewhere (above that
# was part of 'parent.message' already).
frame = traceback.extract_stack ()[- ignore_stack]
line = "%s +%s (%s) : %s" % frame
self._message = " %-20s: %s (%s)\n %-20s: %s" \
% (self._stype, msg, line, self._ptype, parent)
else :
# if we don't have a parent, we are a 1st principle exception,
# i.e. a reaction to some genuine code error. Thus we extract the
# traceback from exactly where we are in the code (the last stack
# frame will be the call to this exception constructor), and we
# create the original exception message from 'stype' and 'message'.
stack = traceback.extract_stack ()
traceback_list = traceback.format_list (stack)
self._traceback = "".join (traceback_list[:-1])
frame = traceback.extract_stack ()[- ignore_stack -1]
line = "%s +%s (%s) : %s" % frame
self._message = "%s (%s)" % (msg, line)
# we can't do that earlier as _msg was not set up before
self._messages = [self._message]
# --------------------------------------------------------------------------
#
def __str__ (self) :
return self.get_message ()
# --------------------------------------------------------------------------
#
def __repr__ (self) :
return "%s\n%s" % (self._message, self._traceback)
# --------------------------------------------------------------------------
#
def _clone (self) :
""" This method is used internally -- see :func:`_get_exception_stack`."""
clone = self.__class__ ("")
clone._parent = self._parent
clone._object = self._object
clone._message = self._message
clone._messages = self._messages
clone._exception = self._exceptions
clone._traceback = self._traceback
clone._stype = self._stype
clone._ptype = self._ptype
return clone
# --------------------------------------------------------------------------
#
@classmethod
def _log (cls, logger, msg, parent=None, api_object=None, level='error'):
""" this class method allows to log the exception message while
constructing a SAGA exception, like::
# raise an exception, no logging
raise saga.IncorrectState ("File is not open")
# raise an exception, log as error event (error level is default)
raise saga.IncorrectState._log (self._logger, "File is not open")
# raise an exception, log as warning event
raise saga.IncorrectState._log (self._logger, "File is not open", level=warning)
raise saga.IncorrectState._log (self._logger, "File is not open", warning) # same
This way, the 'raise' remains clearly in the code, as that is the
dominating semantics of the call.
"""
log_method = logger.error
try :
log_method = getattr (logger, level.lower())
except :
sys.stderr.write ("unknown log level '%s'" % level)
log_method ("%s: %s" % (cls.__name__, msg))
return cls (msg, parent=parent, api_object=api_object, from_log=True)
# --------------------------------------------------------------------------
#
def get_message (self) :
""" Return the exception message as a string. That message is also
available via the 'message' property."""
return self._message
# --------------------------------------------------------------------------
#
def _get_plain_message (self) :
""" Return the plain error message as a string. """
return self._message
# --------------------------------------------------------------------------
#
def get_type (self):
""" Return the type of the exception as string.
"""
return self._stype
# --------------------------------------------------------------------------
#
def get_object (self) :
""" Return the object that raised this exception. An object may not
always be available -- for example, exceptions raised during object
creation may not have the option to keep an incomplete object instance
around. In those cases, this method will return 'None'. Either way,
the object is also accessible via the 'object' property.
"""
# object is a weak_ref, and may have been garbage collected - we simply
# return 'None' then
return self._object ()
# --------------------------------------------------------------------------
#
def _add_exception (self, e) :
"""
Some sub-operation raised a SAGA exception, but other exceptions may
be catched later on. In that case the later exceptions can be added to
the original one with :func:`_add_exception`\(e). Once all exceptions are
collected, a call to :func:`_get_exception_stack`\() will return a new
exception which is selected from the stack by rank and order. All other
exceptions can be accessed from the returned exception by
:func:`get_all_exceptions`\() -- those exceptions are then also ordered
by rank.
"""
self._exceptions.append (e)
self._messages.append (e.message)
if e._rank > self._top_exception._rank :
self._top_exception = e
# --------------------------------------------------------------------------
#
def _get_exception_stack (self) :
"""
This method is internally used by the saga-python engine, and is only
relevant for operations which (potentially) bind to more than one
adaptor.
"""
if self._top_exception == self :
return self
# damned, we can't simply recast ourself to the top exception type -- so
# we have to create a new exception with that type, and copy all state
# over...
# create a new exception with same type as top_exception
clone = self._top_exception._clone ()
clone._exceptions = []
clone._messages = []
# copy all state over
for e in sorted (self._exceptions, key=operator.attrgetter ('_rank'), reverse=True) :
clone._exceptions.append (e)
clone._messages.append (e._message)
return clone
# --------------------------------------------------------------------------
#
def get_all_exceptions (self) :
return self._exceptions
# --------------------------------------------------------------------------
#
def get_all_messages (self) :
return self._messages
# --------------------------------------------------------------------------
#
def get_traceback (self) :
return self._traceback
# --------------------------------------------------------------------------
#
message = property (get_message) # string
object = property (get_object) # object type
type = property (get_type) # exception type
exceptions = property (get_all_exceptions) # list [Exception]
messages = property (get_all_messages) # list [string]
traceback = property (get_traceback) # string
# ------------------------------------------------------------------------------
#
class NotImplemented(SagaException):
""" SAGA-Python does not implement this method or class. (rank: 11)"""
_rank = 11
def __init__ (self, msg, parent=None, api_object=None, from_log=False) :
SagaException.__init__ (self, msg, parent, api_object, from_log)
# ------------------------------------------------------------------------------
#
class IncorrectURL(SagaException):
""" The given URL could not be interpreted, for example due to an incorrect
/ unknown schema. (rank: 10)"""
_rank = 10
def __init__ (self, msg, parent=None, api_object=None, from_log=False) :
SagaException.__init__ (self, msg, parent, api_object, from_log)
# ------------------------------------------------------------------------------
#
class BadParameter(SagaException):
""" A given parameter is out of bound or ill formatted. (rank: 9)"""
_rank = 9
def __init__ (self, msg, parent=None, api_object=None, from_log=False) :
SagaException.__init__ (self, msg, parent, api_object, from_log)
# ------------------------------------------------------------------------------
#
class AlreadyExists(SagaException):
""" The entity to be created already exists. (rank: 8)"""
_rank = 8
def __init__ (self, msg, parent=None, api_object=None, from_log=False) :
SagaException.__init__ (self, msg, parent, api_object, from_log)
# ------------------------------------------------------------------------------
#
class DoesNotExist(SagaException):
""" An operation tried to access a non-existing entity. (rank: 7)"""
_rank = 7
def __init__ (self, msg, parent=None, api_object=None, from_log=False) :
SagaException.__init__ (self, msg, parent, api_object, from_log)
# ------------------------------------------------------------------------------
#
class IncorrectState(SagaException):
""" The operation is not allowed on the entity in its current state. (rank: 6)"""
_rank = 6
def __init__ (self, msg, parent=None, api_object=None, from_log=False) :
SagaException.__init__ (self, msg, parent, api_object, from_log)
# ------------------------------------------------------------------------------
#
class PermissionDenied(SagaException):
""" The used identity is not permitted to perform the requested operation. (rank: 5)"""
_rank = 5
def __init__ (self, msg, parent=None, api_object=None, from_log=False) :
SagaException.__init__ (self, msg, parent, api_object, from_log)
# ------------------------------------------------------------------------------
#
class AuthorizationFailed(SagaException):
""" The backend could not establish a valid identity. (rank: 4)"""
_rank = 4
def __init__ (self, msg, parent=None, api_object=None, from_log=False) :
SagaException.__init__ (self, msg, parent, api_object, from_log)
# ------------------------------------------------------------------------------
#
class AuthenticationFailed(SagaException):
""" The backend could not establish a valid identity. (rank: 3)"""
_rank = 3
def __init__ (self, msg, parent=None, api_object=None, from_log=False) :
SagaException.__init__ (self, msg, parent, api_object, from_log)
# ------------------------------------------------------------------------------
#
class Timeout(SagaException):
""" The interaction with the backend times out. (rank: 2)"""
_rank = 2
def __init__ (self, msg, parent=None, api_object=None, from_log=False) :
SagaException.__init__ (self, msg, parent, api_object, from_log)
# ------------------------------------------------------------------------------
#
class NoSuccess(SagaException):
""" Some other error occurred. (rank: 1)"""
_rank = 1
def __init__ (self, msg, parent=None, api_object=None, from_log=False) :
SagaException.__init__ (self, msg, parent, api_object, from_log)
| |
import json
# django imports
from django.contrib.auth.decorators import permission_required
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_POST
# lfs imports
import lfs.core.utils
from lfs.core.utils import LazyEncoder
from lfs.caching.utils import lfs_get_object_or_404
from lfs.catalog.models import StaticBlock
from lfs.catalog.models import File
from lfs.manage.static_blocks.forms import StaticBlockForm
# views
@permission_required("core.manage_shop")
def manage_static_blocks(request):
"""Dispatches to the first static block or to the add static block form.
"""
try:
sb = StaticBlock.objects.all()[0]
url = reverse("lfs_manage_static_block", kwargs={"id": sb.id})
except IndexError:
url = reverse("lfs_manage_no_static_blocks")
return HttpResponseRedirect(url)
@permission_required("core.manage_shop")
def manage_static_block(request, id, template_name="manage/static_block/static_block.html"):
"""Displays the main form to manage static blocks.
"""
sb = get_object_or_404(StaticBlock, pk=id)
if request.method == "POST":
form = StaticBlockForm(instance=sb, data=request.POST)
if form.is_valid():
form.save()
return lfs.core.utils.set_message_cookie(
url=reverse("lfs_manage_static_block", kwargs={"id": sb.id}),
msg=_(u"Static block has been saved."),
)
else:
form = StaticBlockForm(instance=sb)
return render_to_response(template_name, RequestContext(request, {
"static_block": sb,
"static_blocks": StaticBlock.objects.all(),
"files": files(request, sb),
"form": form,
"current_id": int(id),
}))
@permission_required("core.manage_shop")
def no_static_blocks(request, template_name="manage/static_block/no_static_blocks.html"):
"""Displays that no static blocks exist.
"""
return render_to_response(template_name, RequestContext(request, {}))
# parts
@permission_required("core.manage_shop")
def files(request, sb, template_name="manage/static_block/files.html"):
"""Displays the files tab of the passed static block.
"""
return render_to_string(template_name, RequestContext(request, {
"static_block": sb,
}))
@permission_required("core.manage_shop")
def list_files(request, sb, template_name="manage/static_block/files-list.html"):
"""Displays the files tab of the passed static block.
"""
return files(request, sb, template_name=template_name)
# actions
@permission_required("core.manage_shop")
def update_files(request, id):
"""
"""
static_block = lfs_get_object_or_404(StaticBlock, pk=id)
action = request.POST.get("action")
if action == "delete":
message = _(u"Files has been deleted.")
for key in request.POST.keys():
if key.startswith("delete-"):
try:
id = key.split("-")[1]
file = File.objects.get(pk=id).delete()
except (IndexError, ObjectDoesNotExist):
pass
elif action == "update":
message = _(u"Files has been updated.")
for key, value in request.POST.items():
if key.startswith("title-"):
id = key.split("-")[1]
try:
file = File.objects.get(pk=id)
except File.ObjectDoesNotExist:
pass
else:
file.title = value
file.save()
elif key.startswith("position-"):
try:
id = key.split("-")[1]
file = File.objects.get(pk=id)
except (IndexError, ObjectDoesNotExist):
pass
else:
file.position = value
file.save()
for i, file in enumerate(static_block.files.all()):
file.position = (i + 1) * 10
file.save()
html = (
("#files-list", list_files(request, static_block)),
)
result = json.dumps({
"html": html,
"message": message,
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
def reload_files(request, id):
"""
"""
static_block = lfs_get_object_or_404(StaticBlock, pk=id)
result = list_files(request, static_block)
result = json.dumps({
"html": result,
"message": _(u"Files has been added."),
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
def add_files(request, id):
"""Adds files to static block with passed id.
"""
static_block = lfs_get_object_or_404(StaticBlock, pk=id)
if request.method == "POST":
for file_content in request.FILES.getlist("files[]"):
file = File(content=static_block, title=file_content.name)
file.file.save(file_content.name, file_content, save=True)
ctype = ContentType.objects.get_for_model(static_block)
# Refresh positions
for i, file in enumerate(File.objects.filter(content_type=ctype, content_id=static_block.id)):
file.position = (i + 1) * 10
file.save()
result = json.dumps({"name": file_content.name, "type": "image/jpeg", "size": "123456789"})
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
def add_static_block(request, template_name="manage/static_block/add_static_block.html"):
"""Provides a form to add a new static block.
"""
if request.method == "POST":
form = StaticBlockForm(data=request.POST)
if form.is_valid():
new_sb = form.save()
return lfs.core.utils.set_message_cookie(
url=reverse("lfs_manage_static_block", kwargs={"id": new_sb.id}),
msg=_(u"Static block has been added."),
)
else:
form = StaticBlockForm()
return render_to_response(template_name, RequestContext(request, {
"form": form,
"static_blocks": StaticBlock.objects.all(),
"came_from": (request.POST if request.method == 'POST' else request.GET).get("came_from",
reverse("lfs_manage_static_blocks")),
}))
@permission_required("core.manage_shop")
def preview_static_block(request, id, template_name="manage/static_block/preview.html"):
"""Displays a preview of an static block
"""
sb = get_object_or_404(StaticBlock, pk=id)
return render_to_response(template_name, RequestContext(request, {
"static_block": sb,
}))
@permission_required("core.manage_shop")
@require_POST
def sort_static_blocks(request):
"""Sorts static blocks after drag 'n drop.
"""
static_blocks = request.POST.get("objs", "").split('&')
assert (isinstance(static_blocks, list))
if len(static_blocks) > 0:
position = 10
for sb_str in static_blocks:
sb_id = sb_str.split('=')[1]
sb_obj = StaticBlock.objects.get(pk=sb_id)
sb_obj.position = position
sb_obj.save()
position = position + 10
result = json.dumps({
"message": _(u"The static blocks have been sorted."),
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
@require_POST
def delete_static_block(request, id):
"""Deletes static block with passed id.
"""
sb = get_object_or_404(StaticBlock, pk=id)
# First we delete all referencing categories. Otherwise they would be
# deleted
for category in sb.categories.all():
category.static_block = None
category.save()
sb.delete()
return lfs.core.utils.set_message_cookie(
url=reverse("lfs_manage_static_blocks"),
msg=_(u"Static block has been deleted."),
)
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
from __future__ import print_function
import os.path
import socket
import ssl
import sys
import eventlet
import eventlet.wsgi
import greenlet
from oslo.config import cfg
from oslo.utils import excutils
from paste import deploy
import routes.middleware
import webob.dec
import webob.exc
from nova import exception
from nova.i18n import _, _LE
from nova.openstack.common import log as logging
wsgi_opts = [
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for nova-api'),
cfg.StrOpt('wsgi_log_format',
default='%(client_ip)s "%(request_line)s" status: %(status_code)s'
' len: %(body_length)s time: %(wall_seconds).7f',
help='A python format string that is used as the template to '
'generate log lines. The following values can be formatted '
'into it: client_ip, date_time, request_line, status_code, '
'body_length, wall_seconds.'),
cfg.StrOpt('ssl_ca_file',
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('ssl_cert_file',
help="SSL certificate of API server"),
cfg.StrOpt('ssl_key_file',
help="SSL private key of API server"),
cfg.IntOpt('tcp_keepidle',
default=600,
help="Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X."),
cfg.IntOpt('wsgi_default_pool_size',
default=1000,
help="Size of the pool of greenthreads used by wsgi"),
cfg.IntOpt('max_header_line',
default=16384,
help="Maximum line size of message headers to be accepted. "
"max_header_line may need to be increased when using "
"large tokens (typically those generated by the "
"Keystone v3 API with big service catalogs)."),
]
CONF = cfg.CONF
CONF.register_opts(wsgi_opts)
LOG = logging.getLogger(__name__)
class Server(object):
"""Server class to manage a WSGI server, serving a WSGI application."""
default_pool_size = CONF.wsgi_default_pool_size
def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None,
protocol=eventlet.wsgi.HttpProtocol, backlog=128,
use_ssl=False, max_url_len=None):
"""Initialize, but do not start, a WSGI server.
:param name: Pretty name for logging.
:param app: The WSGI application to serve.
:param host: IP address to serve the application.
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:param backlog: Maximum number of queued connections.
:param max_url_len: Maximum length of permitted URLs.
:returns: None
:raises: nova.exception.InvalidInput
"""
# Allow operators to customize http requests max header line size.
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
self.name = name
self.app = app
self._server = None
self._protocol = protocol
self.pool_size = pool_size or self.default_pool_size
self._pool = eventlet.GreenPool(self.pool_size)
self._logger = logging.getLogger("nova.%s.wsgi.server" % self.name)
self._wsgi_logger = logging.WritableLogger(self._logger)
self._use_ssl = use_ssl
self._max_url_len = max_url_len
if backlog < 1:
raise exception.InvalidInput(
reason='The backlog must be more than 1')
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
family = socket.AF_INET
try:
self._socket = eventlet.listen(bind_addr, family, backlog=backlog)
except EnvironmentError:
LOG.error(_LE("Could not bind to %(host)s:%(port)s"),
{'host': host, 'port': port})
raise
(self.host, self.port) = self._socket.getsockname()[0:2]
LOG.info(_("%(name)s listening on %(host)s:%(port)s"),
{'name': self.name, 'host': self.host, 'port': self.port})
def start(self):
"""Start serving a WSGI application.
:returns: None
"""
# The server socket object will be closed after server exits,
# but the underlying file descriptor will remain open, and will
# give bad file descriptor error. So duplicating the socket object,
# to keep file descriptor usable.
dup_socket = self._socket.dup()
if self._use_ssl:
try:
ca_file = CONF.ssl_ca_file
cert_file = CONF.ssl_cert_file
key_file = CONF.ssl_key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(
_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(
_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(
_("Unable to find key_file : %s") % key_file)
if self._use_ssl and (not cert_file or not key_file):
raise RuntimeError(
_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
ssl_kwargs = {
'server_side': True,
'certfile': cert_file,
'keyfile': key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl_ca_file:
ssl_kwargs['ca_certs'] = ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
dup_socket = eventlet.wrap_ssl(dup_socket,
**ssl_kwargs)
dup_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
dup_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
dup_socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to start %(name)s on %(host)s"
":%(port)s with SSL support"),
{'name': self.name, 'host': self.host,
'port': self.port})
wsgi_kwargs = {
'func': eventlet.wsgi.server,
'sock': dup_socket,
'site': self.app,
'protocol': self._protocol,
'custom_pool': self._pool,
'log': self._wsgi_logger,
'log_format': CONF.wsgi_log_format,
'debug': False
}
if self._max_url_len:
wsgi_kwargs['url_length_limit'] = self._max_url_len
self._server = eventlet.spawn(**wsgi_kwargs)
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self._pool.resize(self.pool_size)
def stop(self):
"""Stop this server.
This is not a very nice action, as currently the method by which a
server is stopped is by killing its eventlet.
:returns: None
"""
LOG.info(_("Stopping WSGI server."))
if self._server is not None:
# Resize pool to stop new requests from being processed
self._pool.resize(0)
self._server.kill()
def wait(self):
"""Block, until the server has stopped.
Waits on the server's eventlet to finish, then returns.
:returns: None
"""
try:
if self._server is not None:
self._pool.waitall()
self._server.wait()
except greenlet.GreenletExit:
LOG.info(_("WSGI server has stopped."))
class Request(webob.Request):
pass
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = nova.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import nova.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(explanation='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError(_('You must implement __call__'))
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
return cls(app, **local_config)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
print(('*' * 40) + ' REQUEST ENVIRON')
for key, value in req.environ.items():
print(key, '=', value)
print()
resp = req.get_response(self.application)
print(('*' * 40) + ' RESPONSE HEADERS')
for (key, value) in resp.headers.iteritems():
print(key, '=', value)
print()
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
print(('*' * 40) + ' BODY')
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print()
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Loader(object):
"""Used to load WSGI applications from paste configurations."""
def __init__(self, config_path=None):
"""Initialize the loader, and attempt to find the config.
:param config_path: Full or relative path to the paste config.
:returns: None
"""
self.config_path = None
config_path = config_path or CONF.api_paste_config
if not os.path.isabs(config_path):
self.config_path = CONF.find_file(config_path)
elif os.path.exists(config_path):
self.config_path = config_path
if not self.config_path:
raise exception.ConfigNotFound(path=config_path)
def load_app(self, name):
"""Return the paste URLMap wrapped WSGI application.
:param name: Name of the application to load.
:returns: Paste URLMap object wrapping the requested application.
:raises: `nova.exception.PasteAppNotFound`
"""
try:
LOG.debug("Loading app %(name)s from %(path)s",
{'name': name, 'path': self.config_path})
return deploy.loadapp("config:%s" % self.config_path, name=name)
except LookupError as err:
LOG.error(err)
raise exception.PasteAppNotFound(name=name, path=self.config_path)
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test base for tf.data service tests."""
import tempfile
from absl import flags
from tensorflow.core.protobuf import service_config_pb2
from tensorflow.python.data.experimental.ops import data_service_ops
from tensorflow.python.data.experimental.service import server_lib
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
# This will be resolved to a tmp directory by `start_dispatch_server`.
TMP_WORK_DIR = "tmp_work_dir_placeholder"
# `""` indicates not to use a work directory.
NO_WORK_DIR = ""
# We use a faster than normal heartbeat interval so that tests run faster.
TEST_HEARTBEAT_INTERVAL_MS = 100
TEST_DISPATCHER_TIMEOUT_MS = 1000
PROTOCOL = "grpc"
TRANSFER_PROTOCOL = flags.DEFINE_string(
"tf_data_service_test_transfer_protocol", None, "Data plane protocol.")
def all_cluster_configurations():
with_work_dir = combinations.combine(
work_dir=TMP_WORK_DIR, fault_tolerant_mode=[True, False])
without_work_dir = combinations.combine(
work_dir=NO_WORK_DIR, fault_tolerant_mode=False)
return with_work_dir + without_work_dir
def _make_worker(dispatcher_address,
data_transfer_protocol,
shutdown_quiet_period_ms=0,
port=0,
worker_tags=None):
"""Creates a worker server."""
defaults = server_lib.WorkerConfig(dispatcher_address=dispatcher_address)
config_proto = service_config_pb2.WorkerConfig(
dispatcher_address=dispatcher_address,
worker_address=defaults.worker_address,
port=port,
protocol=PROTOCOL,
worker_tags=worker_tags,
heartbeat_interval_ms=TEST_HEARTBEAT_INTERVAL_MS,
dispatcher_timeout_ms=TEST_DISPATCHER_TIMEOUT_MS,
data_transfer_protocol=data_transfer_protocol,
data_transfer_address=defaults.worker_address,
shutdown_quiet_period_ms=shutdown_quiet_period_ms)
return server_lib.WorkerServer(config_proto, start=False)
# pylint: disable=protected-access
class TestWorker(object):
"""A tf.data service worker."""
def __init__(self,
dispatcher_address,
shutdown_quiet_period_ms,
data_transfer_protocol=None,
worker_tags=None):
self._dispatcher_address = dispatcher_address
self._shutdown_quiet_period_ms = shutdown_quiet_period_ms
self._server = _make_worker(
dispatcher_address,
data_transfer_protocol,
shutdown_quiet_period_ms,
worker_tags=worker_tags)
self._running = False
self._data_transfer_protocol = data_transfer_protocol
def stop(self):
self._server._stop()
self._running = False
def start(self):
self._server.start()
self._port = int(self._server._address.split(":")[1])
self._running = True
def restart(self, use_same_port=True):
"""Restarts the worker, stopping it first if it is already running."""
if self._running:
self.stop()
port = 0
if use_same_port:
port = self._port
self._server = _make_worker(self._dispatcher_address,
self._data_transfer_protocol,
self._shutdown_quiet_period_ms, port)
self._server.start()
self._port = int(self._server._address.split(":")[1])
self._running = True
def join(self):
self._server.join()
def num_tasks(self):
return self._server._num_tasks()
def worker_address(self):
return self._server._address
class TestCluster(object):
"""Test tf.data service cluster."""
def __init__(self,
num_workers,
dispatcher_port=0,
work_dir=TMP_WORK_DIR,
fault_tolerant_mode=True,
job_gc_check_interval_ms=None,
job_gc_timeout_ms=None,
worker_shutdown_quiet_period_ms=0,
start=True,
data_transfer_protocol=None):
"""Creates a tf.data service test cluster.
Args:
num_workers: The number of workers to initially add to the cluster.
dispatcher_port: The port to use for the dispatcher.
work_dir: The work directory to use for the dispatcher. If set to
`TMP_WORK_DIR`, the cluster will create a new temporary directory to use
as the work directory. If set to `NO_WORK_DIR`, no work directory will
be used.
fault_tolerant_mode: Whether the dispatcher should write its state to a
journal so that it can recover from restarts.
job_gc_check_interval_ms: How often the dispatcher should scan through to
delete old and unused jobs, in milliseconds.
job_gc_timeout_ms: How long a job needs to be unused before it becomes a
candidate for garbage collection, in milliseconds.
worker_shutdown_quiet_period_ms: When shutting down a worker, how long to
wait for the gRPC server to process the final requests.
start: Whether to immediately start the servers in the cluster. If
`False`, the servers can be started later by calling
`start_dispatcher()` and `start_workers()`.
data_transfer_protocol: (Optional.) The protocol to use for transferring
data with the tf.data service. The default can controlled via
tf_data_service_test_transfer_protocol flag.
"""
if work_dir == TMP_WORK_DIR:
work_dir = tempfile.mkdtemp(dir=googletest.GetTempDir())
self._worker_shutdown_quiet_period_ms = worker_shutdown_quiet_period_ms
if not data_transfer_protocol:
data_transfer_protocol = TRANSFER_PROTOCOL.value
self._data_transfer_protocol = data_transfer_protocol
self.dispatcher = server_lib.DispatchServer(
server_lib.DispatcherConfig(
port=dispatcher_port,
work_dir=work_dir,
protocol=PROTOCOL,
fault_tolerant_mode=fault_tolerant_mode,
job_gc_check_interval_ms=job_gc_check_interval_ms,
job_gc_timeout_ms=job_gc_timeout_ms),
start=start)
self.workers = []
for _ in range(num_workers):
self.add_worker(start=start)
def dispatcher_address(self):
return self.dispatcher.target.split("://")[1]
def add_worker(self, start=True):
worker = TestWorker(self.dispatcher_address(),
self._worker_shutdown_quiet_period_ms,
self._data_transfer_protocol)
if start:
worker.start()
self.workers.append(worker)
def start_dispatcher(self):
self.dispatcher.start()
def start_workers(self):
for worker in self.workers:
worker.start()
def stop_dispatcher(self):
# pylint: disable=protected-access
self.dispatcher._stop()
def stop_workers(self):
for worker in self.workers:
worker.stop()
# pylint: disable=protected-access
def restart_dispatcher(self):
"""Stops `dispatcher` and creates a new dispatcher with the same port.
Restarting is supported only when the dispatcher is configured with
`fault_tolerant_mode=True`.
"""
if not self.dispatcher._config.fault_tolerant_mode:
raise ValueError(
"Trying to restart the dispatcher without fault-tolerance.")
port = int(self.dispatcher_address().split(":")[1])
self.dispatcher._stop()
self.dispatcher = server_lib.DispatchServer(
server_lib.DispatcherConfig(
port=port,
work_dir=self.dispatcher._config.work_dir,
protocol=PROTOCOL,
fault_tolerant_mode=self.dispatcher._config.fault_tolerant_mode))
def num_registered_workers(self):
return self.dispatcher._num_workers()
def num_tasks_on_workers(self):
return sum(worker.num_tasks() for worker in self.workers)
def __del__(self):
# Destroy workers before the dispatcher for clean shutdown.
self.workers.clear()
del self.dispatcher
class TestBase(test_base.DatasetTestBase):
"""Base class for tf.data service tests."""
def register_dataset(self, dispatcher_address, dataset):
compression = "AUTO"
if TRANSFER_PROTOCOL.value is not None:
compression = None
return data_service_ops.register_dataset(
dispatcher_address, dataset, compression=compression)
def from_dataset_id(self,
processing_mode,
cluster,
dataset_id,
element_spec,
job_name=None):
return data_service_ops.from_dataset_id(
processing_mode,
cluster.dispatcher_address(),
dataset_id,
element_spec,
data_transfer_protocol=TRANSFER_PROTOCOL.value,
job_name=job_name)
def make_distributed_dataset(self,
dataset,
cluster,
processing_mode="parallel_epochs",
job_name=None,
consumer_index=None,
num_consumers=None,
max_outstanding_requests=None,
compression="AUTO",
target_workers="AUTO"):
# pylint: disable=protected-access
return dataset.apply(
data_service_ops._distribute(
processing_mode,
cluster.dispatcher_address(),
job_name=job_name,
consumer_index=consumer_index,
num_consumers=num_consumers,
max_outstanding_requests=max_outstanding_requests,
task_refresh_interval_hint_ms=20,
data_transfer_protocol=TRANSFER_PROTOCOL.value,
compression=compression,
target_workers=target_workers))
def make_distributed_range_dataset(self,
num_elements,
cluster,
processing_mode="parallel_epochs",
job_name=None,
max_outstanding_requests=None,
compression="AUTO",
target_workers="AUTO"):
dataset = dataset_ops.Dataset.range(num_elements)
return self.make_distributed_dataset(
dataset,
cluster,
processing_mode=processing_mode,
job_name=job_name,
max_outstanding_requests=max_outstanding_requests,
compression=compression,
target_workers=target_workers)
def make_coordinated_read_dataset(
self,
cluster,
num_consumers,
sharding_policy=data_service_ops.ShardingPolicy.OFF):
"""Creates a dataset that performs coordinated reads.
The dataset simulates `num_consumers` consumers by using parallel
interleave to read with `num_consumers` threads, one for each consumer. The
nth element of the dataset is produced by consumer `n % num_consumers`.
The dataset executed on each worker will produce groups of `num_consumers`
sequentially increasing numbers. For example, if `num_consumers=3` a worker
dataset could produce [0, 1, 2, 9, 10, 11, 21, 22, 23]. This enables
`checkCoordinatedReadGroups` below to assess whether the values received in
each step came from the same group.
Args:
cluster: A tf.data service `TestCluster`.
num_consumers: The number of consumers to simulate.
sharding_policy: The sharding policy to use. Currently only OFF and
DYNAMIC are supported.
Returns:
A dataset that simulates reading with `num_consumers` consumers.
"""
if sharding_policy not in [
data_service_ops.ShardingPolicy.OFF,
data_service_ops.ShardingPolicy.DYNAMIC
]:
raise ValueError(f"Unsupported sharding policy: {sharding_policy}")
# Start from 0 so that we can detect when a new worker is added with
# ShardingPolicy.OFF.
ds = dataset_ops.Dataset.from_tensors(math_ops.cast(0, dtypes.int64))
ds = ds.concatenate(dataset_ops.Dataset.random())
# Ensure that all elements in the same group are consecutive.
def make_group(x):
# Avoid overflowing an int64 in (x+1)*num_consumers below.
x = x % (2**32)
return dataset_ops.Dataset.range(x*num_consumers, (x+1)*num_consumers)
ds = ds.flat_map(make_group)
consumers = []
for consumer_index in range(num_consumers):
consumers.append(
self.make_distributed_dataset(
ds,
cluster,
job_name="test",
processing_mode=sharding_policy,
consumer_index=consumer_index,
num_consumers=num_consumers))
# Use parallel interleave to read from consumers in parallel.
ds = dataset_ops.Dataset.from_tensor_slices(consumers)
ds = ds.interleave(
lambda x: x,
cycle_length=num_consumers,
num_parallel_calls=num_consumers)
return ds
def checkCoordinatedReadGroups(self, results, num_consumers):
"""Validates results from a `make_coordinted_read_dataset` dataset.
Each group of `num_consumers` results should be consecutive, indicating that
they were produced by the same worker.
Args:
results: The elements produced by the dataset.
num_consumers: The number of consumers.
"""
groups = [
results[start:start + num_consumers]
for start in range(0, len(results), num_consumers)
]
incorrect_groups = []
for group in groups:
# Check that each group of `num_consumers` results are consecutive.
for offset in range(1, len(group)):
if group[0] + offset != group[offset]:
incorrect_groups.append(group)
break
self.assertEmpty(
incorrect_groups,
"Incorrect groups: {}.\nAll groups: {}".format(incorrect_groups,
groups))
def read(self, get_next, results, count):
for _ in range(count):
results.append(self.evaluate(get_next()))
| |
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
class flowbtc (Exchange):
def describe(self):
return self.deep_extend(super(flowbtc, self).describe(), {
'id': 'flowbtc',
'name': 'flowBTC',
'countries': 'BR', # Brazil
'version': 'v1',
'rateLimit': 1000,
'hasCORS': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/28162465-cd815d4c-67cf-11e7-8e57-438bea0523a2.jpg',
'api': 'https://api.flowbtc.com:8400/ajax',
'www': 'https://trader.flowbtc.com',
'doc': 'http://www.flowbtc.com.br/api/',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'post': [
'GetTicker',
'GetTrades',
'GetTradesByDate',
'GetOrderBook',
'GetProductPairs',
'GetProducts',
],
},
'private': {
'post': [
'CreateAccount',
'GetUserInfo',
'SetUserInfo',
'GetAccountInfo',
'GetAccountTrades',
'GetDepositAddresses',
'Withdraw',
'CreateOrder',
'ModifyOrder',
'CancelOrder',
'CancelAllOrders',
'GetAccountOpenOrders',
'GetOrderFee',
],
},
},
})
async def fetch_markets(self):
response = await self.publicPostGetProductPairs()
markets = response['productPairs']
result = []
for p in range(0, len(markets)):
market = markets[p]
id = market['name']
base = market['product1Label']
quote = market['product2Label']
symbol = base + '/' + quote
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostGetAccountInfo()
balances = response['currencies']
result = {'info': response}
for b in range(0, len(balances)):
balance = balances[b]
currency = balance['name']
account = {
'free': balance['balance'],
'used': balance['hold'],
'total': 0.0,
}
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.publicPostGetOrderBook(self.extend({
'productPair': market['id'],
}, params))
return self.parse_order_book(orderbook, None, 'bids', 'asks', 'px', 'qty')
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicPostGetTicker(self.extend({
'productPair': market['id'],
}, params))
timestamp = self.milliseconds()
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['last']),
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['volume24hr']),
'quoteVolume': float(ticker['volume24hrProduct2']),
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = trade['unixtime'] * 1000
side = 'buy' if (trade['incomingOrderSide'] == 0) else 'sell'
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'id': str(trade['tid']),
'order': None,
'type': None,
'side': side,
'price': trade['px'],
'amount': trade['qty'],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicPostGetTrades(self.extend({
'ins': market['id'],
'startIndex': -1,
}, params))
return self.parse_trades(response['trades'], market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
orderType = 1 if (type == 'market') else 0
order = {
'ins': self.market_id(symbol),
'side': side,
'orderType': orderType,
'qty': amount,
'px': price,
}
response = await self.privatePostCreateOrder(self.extend(order, params))
return {
'info': response,
'id': response['serverOrderId'],
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
if 'ins' in params:
return await self.privatePostCancelOrder(self.extend({
'serverOrderId': id,
}, params))
raise ExchangeError(self.id + ' requires `ins` symbol parameter for cancelling an order')
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + path
if api == 'public':
if params:
body = self.json(params)
else:
self.check_required_credentials()
nonce = self.nonce()
auth = str(nonce) + self.uid + self.apiKey
signature = self.hmac(self.encode(auth), self.encode(self.secret))
body = self.json(self.extend({
'apiKey': self.apiKey,
'apiNonce': nonce,
'apiSig': signature.upper(),
}, params))
headers = {
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'isAccepted' in response:
if response['isAccepted']:
return response
raise ExchangeError(self.id + ' ' + self.json(response))
| |
import csv
import json
import logging
import os
import yaml
import distutils.version
import numbers
import numpy as np
import ray.cloudpickle as cloudpickle
from ray.tune.result import (NODE_IP, TRAINING_ITERATION, TIME_TOTAL_S,
TIMESTEPS_TOTAL, EXPR_PARAM_FILE,
EXPR_PARAM_PICKLE_FILE, EXPR_PROGRESS_FILE,
EXPR_RESULT_FILE)
from ray.tune.syncer import get_node_syncer
from ray.tune.utils import flatten_dict
logger = logging.getLogger(__name__)
tf = None
VALID_SUMMARY_TYPES = [int, float, np.float32, np.float64, np.int32]
class Logger:
"""Logging interface for ray.tune.
By default, the UnifiedLogger implementation is used which logs results in
multiple formats (TensorBoard, rllab/viskit, plain json, custom loggers)
at once.
Arguments:
config: Configuration passed to all logger creators.
logdir: Directory for all logger creators to log to.
"""
def __init__(self, config, logdir, trial=None):
self.config = config
self.logdir = logdir
self.trial = trial
self._init()
def _init(self):
pass
def on_result(self, result):
"""Given a result, appends it to the existing log."""
raise NotImplementedError
def update_config(self, config):
"""Updates the config for logger."""
pass
def close(self):
"""Releases all resources used by this logger."""
pass
def flush(self):
"""Flushes all disk writes to storage."""
pass
class NoopLogger(Logger):
def on_result(self, result):
pass
class MLFLowLogger(Logger):
"""MLFlow logger.
Requires the experiment configuration to have a MLFlow Experiment ID
or manually set the proper environment variables.
"""
def _init(self):
from mlflow.tracking import MlflowClient
client = MlflowClient()
run = client.create_run(self.config.get("mlflow_experiment_id"))
self._run_id = run.info.run_id
for key, value in self.config.items():
client.log_param(self._run_id, key, value)
self.client = client
def on_result(self, result):
for key, value in result.items():
if not isinstance(value, float):
continue
self.client.log_metric(
self._run_id, key, value, step=result.get(TRAINING_ITERATION))
def close(self):
self.client.set_terminated(self._run_id)
class JsonLogger(Logger):
def _init(self):
self.update_config(self.config)
local_file = os.path.join(self.logdir, EXPR_RESULT_FILE)
self.local_out = open(local_file, "a")
def on_result(self, result):
json.dump(result, self, cls=_SafeFallbackEncoder)
self.write("\n")
self.local_out.flush()
def write(self, b):
self.local_out.write(b)
def flush(self):
self.local_out.flush()
def close(self):
self.local_out.close()
def update_config(self, config):
self.config = config
config_out = os.path.join(self.logdir, EXPR_PARAM_FILE)
with open(config_out, "w") as f:
json.dump(
self.config,
f,
indent=2,
sort_keys=True,
cls=_SafeFallbackEncoder)
config_pkl = os.path.join(self.logdir, EXPR_PARAM_PICKLE_FILE)
with open(config_pkl, "wb") as f:
cloudpickle.dump(self.config, f)
def tf2_compat_logger(config, logdir, trial=None):
"""Chooses TensorBoard logger depending on imported TF version."""
global tf
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
logger.warning("Not importing TensorFlow for test purposes")
tf = None
raise RuntimeError("Not importing TensorFlow for test purposes")
else:
import tensorflow as tf
use_tf2_api = (distutils.version.LooseVersion(tf.__version__) >=
distutils.version.LooseVersion("1.15.0"))
if use_tf2_api:
# This is temporarily for RLlib because it disables v2 behavior...
from tensorflow.python import tf2
if not tf2.enabled():
tf = tf.compat.v1
return TFLogger(config, logdir, trial)
tf = tf.compat.v2 # setting this for TF2.0
return TF2Logger(config, logdir, trial)
else:
return TFLogger(config, logdir, trial)
class TF2Logger(Logger):
"""TensorBoard Logger for TF version >= 2.0.0.
Automatically flattens nested dicts to show on TensorBoard:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
If you need to do more advanced logging, it is recommended
to use a Summary Writer in the Trainable yourself.
"""
def _init(self):
global tf
if tf is None:
import tensorflow as tf
tf = tf.compat.v2 # setting this for TF2.0
self._file_writer = None
self._hp_logged = False
def on_result(self, result):
if self._file_writer is None:
from tensorflow.python.eager import context
from tensorboard.plugins.hparams import api as hp
self._context = context
self._file_writer = tf.summary.create_file_writer(self.logdir)
with tf.device("/CPU:0"):
with tf.summary.record_if(True), self._file_writer.as_default():
step = result.get(
TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
tmp = result.copy()
if not self._hp_logged:
if self.trial and self.trial.evaluated_params:
try:
hp.hparams(
self.trial.evaluated_params,
trial_id=self.trial.trial_id)
except Exception as exc:
logger.error("HParams failed with %s", exc)
self._hp_logged = True
for k in [
"config", "pid", "timestamp", TIME_TOTAL_S,
TRAINING_ITERATION
]:
if k in tmp:
del tmp[k] # not useful to log these
flat_result = flatten_dict(tmp, delimiter="/")
path = ["ray", "tune"]
for attr, value in flat_result.items():
if type(value) in VALID_SUMMARY_TYPES:
tf.summary.scalar(
"/".join(path + [attr]), value, step=step)
self._file_writer.flush()
def flush(self):
if self._file_writer is not None:
self._file_writer.flush()
def close(self):
if self._file_writer is not None:
self._file_writer.close()
def to_tf_values(result, path):
from tensorboardX.summary import make_histogram
flat_result = flatten_dict(result, delimiter="/")
values = []
for attr, value in flat_result.items():
if type(value) in VALID_SUMMARY_TYPES:
values.append(
tf.Summary.Value(
tag="/".join(path + [attr]), simple_value=value))
elif type(value) is list and len(value) > 0:
values.append(
tf.Summary.Value(
tag="/".join(path + [attr]),
histo=make_histogram(values=np.array(value), bins=10)))
return values
class TFLogger(Logger):
"""TensorBoard Logger for TF version < 2.0.0.
Automatically flattens nested dicts to show on TensorBoard:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
If you need to do more advanced logging, it is recommended
to use a Summary Writer in the Trainable yourself.
"""
def _init(self):
global tf
if tf is None:
import tensorflow as tf
tf = tf.compat.v1 # setting this for regular TF logger
logger.debug("Initializing TFLogger instead of TF2Logger.")
self._file_writer = tf.summary.FileWriter(self.logdir)
def on_result(self, result):
tmp = result.copy()
for k in [
"config", "pid", "timestamp", TIME_TOTAL_S, TRAINING_ITERATION
]:
if k in tmp:
del tmp[k] # not useful to tf log these
values = to_tf_values(tmp, ["ray", "tune"])
train_stats = tf.Summary(value=values)
t = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
self._file_writer.add_summary(train_stats, t)
iteration_value = to_tf_values({
TRAINING_ITERATION: result[TRAINING_ITERATION]
}, ["ray", "tune"])
iteration_stats = tf.Summary(value=iteration_value)
self._file_writer.add_summary(iteration_stats, t)
self._file_writer.flush()
def flush(self):
self._file_writer.flush()
def close(self):
self._file_writer.close()
class CSVLogger(Logger):
"""Logs results to progress.csv under the trial directory.
Automatically flattens nested dicts in the result dict before writing
to csv:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
"""
def _init(self):
"""CSV outputted with Headers as first set of results."""
progress_file = os.path.join(self.logdir, EXPR_PROGRESS_FILE)
self._continuing = os.path.exists(progress_file)
self._file = open(progress_file, "a")
self._csv_out = None
def on_result(self, result):
tmp = result.copy()
if "config" in tmp:
del tmp["config"]
result = flatten_dict(tmp, delimiter="/")
if self._csv_out is None:
self._csv_out = csv.DictWriter(self._file, result.keys())
if not self._continuing:
self._csv_out.writeheader()
self._csv_out.writerow(
{k: v
for k, v in result.items() if k in self._csv_out.fieldnames})
self._file.flush()
def flush(self):
self._file.flush()
def close(self):
self._file.close()
class TBXLogger(Logger):
"""TensorBoardX Logger.
Note that hparams will be written only after a trial has terminated.
This logger automatically flattens nested dicts to show on TensorBoard:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
"""
def _init(self):
try:
from tensorboardX import SummaryWriter
except ImportError:
logger.error("pip install 'ray[tune]' to see TensorBoard files.")
raise
self._file_writer = SummaryWriter(self.logdir, flush_secs=30)
self.last_result = None
def on_result(self, result):
step = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
tmp = result.copy()
for k in [
"config", "pid", "timestamp", TIME_TOTAL_S, TRAINING_ITERATION
]:
if k in tmp:
del tmp[k] # not useful to log these
flat_result = flatten_dict(tmp, delimiter="/")
path = ["ray", "tune"]
valid_result = {}
for attr, value in flat_result.items():
full_attr = "/".join(path + [attr])
if type(value) in VALID_SUMMARY_TYPES:
valid_result[full_attr] = value
self._file_writer.add_scalar(
full_attr, value, global_step=step)
elif type(value) is list and len(value) > 0:
valid_result[full_attr] = value
try:
self._file_writer.add_histogram(
full_attr, value, global_step=step)
# In case TensorboardX still doesn't think it's a valid value
# (e.g. `[[]]`), warn and move on.
except ValueError:
logger.warning(
"You are trying to log an invalid value ({}={}) "
"via {}!".format(full_attr, value,
type(self).__name__))
self.last_result = valid_result
self._file_writer.flush()
def flush(self):
if self._file_writer is not None:
self._file_writer.flush()
def close(self):
if self._file_writer is not None:
if self.trial and self.trial.evaluated_params and self.last_result:
self._try_log_hparams(self.last_result)
self._file_writer.close()
def _try_log_hparams(self, result):
# TBX currently errors if the hparams value is None.
scrubbed_params = {
k: v
for k, v in self.trial.evaluated_params.items() if v is not None
}
from tensorboardX.summary import hparams
experiment_tag, session_start_tag, session_end_tag = hparams(
hparam_dict=scrubbed_params, metric_dict=result)
self._file_writer.file_writer.add_summary(experiment_tag)
self._file_writer.file_writer.add_summary(session_start_tag)
self._file_writer.file_writer.add_summary(session_end_tag)
DEFAULT_LOGGERS = (JsonLogger, CSVLogger, TBXLogger)
class UnifiedLogger(Logger):
"""Unified result logger for TensorBoard, rllab/viskit, plain json.
Arguments:
config: Configuration passed to all logger creators.
logdir: Directory for all logger creators to log to.
loggers (list): List of logger creators. Defaults to CSV, Tensorboard,
and JSON loggers.
sync_function (func|str): Optional function for syncer to run.
See ray/python/ray/tune/syncer.py
"""
def __init__(self,
config,
logdir,
trial=None,
loggers=None,
sync_function=None):
if loggers is None:
self._logger_cls_list = DEFAULT_LOGGERS
else:
self._logger_cls_list = loggers
self._sync_function = sync_function
self._log_syncer = None
super(UnifiedLogger, self).__init__(config, logdir, trial)
def _init(self):
self._loggers = []
for cls in self._logger_cls_list:
try:
self._loggers.append(cls(self.config, self.logdir, self.trial))
except Exception as exc:
logger.warning("Could not instantiate %s: %s.", cls.__name__,
str(exc))
self._log_syncer = get_node_syncer(
self.logdir,
remote_dir=self.logdir,
sync_function=self._sync_function)
def on_result(self, result):
for _logger in self._loggers:
_logger.on_result(result)
self._log_syncer.set_worker_ip(result.get(NODE_IP))
self._log_syncer.sync_down_if_needed()
def update_config(self, config):
for _logger in self._loggers:
_logger.update_config(config)
def close(self):
for _logger in self._loggers:
_logger.close()
def flush(self, sync_down=True):
for _logger in self._loggers:
_logger.flush()
if sync_down:
if not self._log_syncer.sync_down():
logger.warning("Trial %s: Post-flush sync skipped.",
self.trial)
def sync_up(self):
return self._log_syncer.sync_up()
def sync_down(self):
return self._log_syncer.sync_down()
def wait(self):
self._log_syncer.wait()
def sync_results_to_new_location(self, worker_ip):
"""Sends the current log directory to the remote node.
Syncing will not occur if the cluster is not started
with the Ray autoscaler.
"""
if worker_ip != self._log_syncer.worker_ip:
logger.info("Trial %s: Syncing (blocking) results to %s",
self.trial, worker_ip)
self._log_syncer.reset()
self._log_syncer.set_worker_ip(worker_ip)
if not self._log_syncer.sync_up():
logger.error(
"Trial %s: Sync up to new location skipped. "
"This should not occur.", self.trial)
self._log_syncer.wait()
else:
logger.error(
"Trial %s: Sync attempted to same IP %s. This "
"should not occur.", self.trial, worker_ip)
class _SafeFallbackEncoder(json.JSONEncoder):
def __init__(self, nan_str="null", **kwargs):
super(_SafeFallbackEncoder, self).__init__(**kwargs)
self.nan_str = nan_str
def default(self, value):
try:
if np.isnan(value):
return self.nan_str
if (type(value).__module__ == np.__name__
and isinstance(value, np.ndarray)):
return value.tolist()
if issubclass(type(value), numbers.Integral):
return int(value)
if issubclass(type(value), numbers.Number):
return float(value)
return super(_SafeFallbackEncoder, self).default(value)
except Exception:
return str(value) # give up, just stringify it (ok for logs)
def pretty_print(result):
result = result.copy()
result.update(config=None) # drop config from pretty print
result.update(hist_stats=None) # drop hist_stats from pretty print
out = {}
for k, v in result.items():
if v is not None:
out[k] = v
cleaned = json.dumps(out, cls=_SafeFallbackEncoder)
return yaml.safe_dump(json.loads(cleaned), default_flow_style=False)
| |
#! /home/users/cordier/.linuxbrew/bin/python3
#
# Note: This Script Has Only Been Validated on FastQ
#
def chunks (iterator, size):
"""
Split File into Chunks for Processing - Not Yet Implemented
"""
reads = True
while reads:
reads = []
while len(chunk) < size:
try:
reads = iterator.next()
except StopIteration:
reads = None
if reads is None:
break
chunk.append(reads)
if chunk:
yield chunk
if __name__ == "__main__":
# Imports
import sys, argparse
# Library Import
from Bio import SeqIO
from Bio.SeqIO.QualityIO import PairedFastaQualIterator
# Accepted Formats
acceptedFormats = ["fasta", "fastq", "qual", "fa", "fq", "sam"]
#
# Parse Arguments
#
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", type = str, help = "Input File")
parser.add_argument("-p", "--prefix", type = str, help = "Output Prefix for Warnings CSV")
parser.add_argument("-f", "--format", type = str, help = "Format of Input File (overrides automatic detection)")
parser.add_argument("--paired", action = "store_true", help = "Interleaved Paired-End Reads File")
parser.add_argument("--strict", action = "store_true", help = "Evaluate complete file - i.e. don't output first error & exit")
parser.add_argument("--allow_orphan_reads", action = "store_true", help = "Allow For Orphan Reads")
argsDict = vars(parser.parse_args())
# Set Arguments
inputfile = argsDict["input"]
format = argsDict["format"]
prefix = argsDict["prefix"]
paired = argsDict["paired"]
strict = argsDict["strict"]
orphans = argsDict["allow_orphan_reads"]
# Detect Format
if format is None:
format = (inputfile.split(".")[-1]).lower()
# Get Prefix
if prefix is None:
prefix = inputfile.replace("." + format, "").split("/")[-1]
# Assertions for Required Input
assert (inputfile is not None), "No input file provided!"
assert (format in acceptedFormats), "Invalid format: %s!" % format
#
# Validation
#
if not strict:
warnings = {
"unpaired_reads" : [],
"qual_seq_len_mismatch" : [],
"id_mismatch" : {
"missing_1" : [],
"missing_2" : []
}
}
if paired:
# Not Strict, Paired, No Orphans
print("\nValidating That Sequences are Correctly Interleaved and That Sequence & Quality Scores Are of The Same Length")
with open(inputfile, "r") as handle:
records = SeqIO.parse(handle, format)
resetReadframe = False
count = 0
for record in records:
seqA = record
seqB = next(records)
count += 2
# Are Reads Paired-End?
if (seqA.id[-2:] != "/1") and (seqA.id[-2:] != "/2"):
warnings["unpaired_reads"].append(seqA.id)
if (seqB.id[-2:] != "/1") and (seqB.id[-2:] != "/2"):
warnings["unpaired_reads"].append(seqB.id)
# Are Sequence Lengths & Quality Lengths the Same?
if (len(seqA.seq) != len(seqA.letter_annotations["phred_quality"])):
warnings["qual_seq_len_mismatch"].append(seqA.id)
if (len(seqB.seq) != len(seqB.letter_annotations["phred_quality"])):
warnings["qual_seq_len_mismatch"].append(seqB.id)
# Are Paired IDs The Same?
if seqA.id[0:-2] != seqB.id[0:-2]:
# Is Read /1 In Fact the Read /1? If Not, 1 is in Fact 2 & Missing it's /1 Pair
if (seqA.id[-2:] != "/1"):
warnings["id_mismatch"]["missing_1"].append(seqA.id)
# Is Read /2 In Fact the Read /2? If Not, 2 is in Fact next 1, & 1 is Missing it's /2 Pair
if (seqB.id[-2:] != "/2"):
warnings["id_mismatch"]["missing_2"].append(seqA.id)
# Write Warnings
with open("validation_warnings.%s.tsv" % prefix, "w") as warningfile:
warningfile.write("id\twarning\n")
for seqID in warnings["unpaired_reads"]:
warningfile.write("%s\tunpaired_reads\n" % seqID)
for seqID in warnings["qual_seq_len_mismatch"]:
warningfile.write("%s\tqual_seq_len_mismatch\n" % seqID)
for seqID in warnings["id_mismatch"]["missing_1"]:
warningfile.write("%s\tid_mismatch_missing_1\n" % seqID)
for seqID in warnings["id_mismatch"]["missing_2"]:
warningfile.write("%s\tid_mismatch_missing_2\n" % seqID)
# Print Results
print("\nParsed %d Paired End Reads and Found:" % count)
print(" %d Unpaired Read(s)" % len(warnings["unpaired_reads"]))
print(" %d Read(s) Are Missing Their /1 Mate" % len(warnings["id_mismatch"]["missing_1"]))
print(" %d Read(s) Are Missing Their /2 Mate" % len(warnings["id_mismatch"]["missing_2"]))
print(" %d Read(s) With a Sequence / Quality String Length Mismatch" % len(warnings["qual_seq_len_mismatch"]))
print("\nWarnings Written to: validation_warnings.%s.tsv" % prefix)
else:
# Not Strict, Single
print("\nValidating That Single-End Sequence & Quality Scores Are of The Same Length")
with open(inputfile, "r") as handle:
records = SeqIO.parse(handle, format)
count = 0
for record in records:
# Are Sequence Lengths & Quality Lengths the Same?
assert (len(record.seq) == len(record.letter_annotations["phred_quality"])), "Error: Sequence & Quality Lengths Do Not Match: (%s)" % record.id
count += 1
# Write Warnings
with open("validation_warnings.%s.tsv" % prefix, "w") as warningfile:
warningfile.write("id\twarning\n")
for seqID in warnings["qual_seq_len_mismatch"]:
warningfile.write("%s\tqual_seq_len_mismatch\n" % seqID)
# Print Results
print("\nParsed %d Single End Reads and Found:" % count)
print("\t%d Read(s) With a Sequence / Quality String Length Mismatch" % len(warnings["qual_seq_len_mismatch"]))
print("\nWarnings Written to: validation_warnings.%s.tsv" % prefix)
else:
# Strict, Paired, No Orphans
if paired:
print("\nValidating That Sequences are Correctly Interleaved and That Sequence & Quality Scores Are of The Same Length")
if orphans:
with open(inputfile, "r") as handle:
records = SeqIO.parse(handle, format)
for record in records:
seqA = record
seqB = next(records)
# Are Reads Paired-End?
assert (seqA.id[-2:] == "/1") or (seqA.id[-2:] == "/2"), "Error: Sequence ID (%s) Does Not Indicate Paired Data" % seqA.id
assert (seqB.id[-2:] == "/1") or (seqB.id[-2:] == "/2"), "Error: Sequence ID (%s) Does Not Indicate Paired Data" % seqB.id
# Are Sequence Lengths & Quality Lengths the Same?
assert (len(seqA.seq) == len(seqA.letter_annotations["phred_quality"])), "Error: Sequence & Quality Lengths Do Not Match: (%s)" % seqA.id
assert (len(seqB.seq) == len(seqB.letter_annotations["phred_quality"])), "Error: Sequence & Quality Lengths Do Not Match: (%s)" % seqB.id
else:
with open(inputfile, "r") as handle:
records = SeqIO.parse(handle, format)
for record in records:
seqA = record
seqB = next(records)
# Are Reads Paired-End?
assert (seqA.id[-2:] == "/1") or (seqA.id[-2:] == "/2"), "Error: Sequence ID (%s) Does Not Indicate Paired Data" % seqA.id
assert (seqB.id[-2:] == "/1") or (seqB.id[-2:] == "/2"), "Error: Sequence ID (%s) Does Not Indicate Paired Data" % seqB.id
# Are Paired IDs The Same?
if seqA.id[0:-2] != seqB.id[0:-2]:
# Is Read /1 In Fact the Read /1? If Not, 1 is in Fact 2 & Missing it's /1 Pair
assert (seqA.id[-2:] == "/1"), "Orphan Read Found (Missing /1 of Pair): %s" % seqA.id
# Is Read /2 In Fact the Read /2? If Not, 2 is in Fact next 1, & 1 is Missing it's /2 Pair
assert (seqB.id[-2:] == "/2"), "Orphan Read Found (Missing /2 of Pair): %s" % seqA.id
# Are Sequence Lengths & Quality Lengths the Same?
assert (len(seqA.seq) == len(seqA.letter_annotations["phred_quality"])), "Error: Sequence & Quality Lengths Do Not Match: (%s)" % seqA.id
assert (len(seqB.seq) == len(seqB.letter_annotations["phred_quality"])), "Error: Sequence & Quality Lengths Do Not Match: (%s)" % seqB.id
else:
# Strict, Single
print("\nValidating That Single-End Sequence & Quality Scores Are of The Same Length")
with open(inputfile, "r") as handle:
records = SeqIO.parse(handle, format)
for record in records:
# Are Sequence Lengths & Quality Lengths the Same?
assert (len(record.seq) == len(record.letter_annotations["phred_quality"])), "Error: Sequence & Quality Lengths Do Not Match: (%s)" % record.id
print("\nDone")
else:
pass
| |
# encoding: utf-8
# Copyright 2013 maker
# License
"""
Sales module objects.
"""
from django.db import models
from maker.core.models import Object, User, ModuleSetting
from maker.identities.models import Contact
from maker.finance.models import Transaction, Currency, Tax
from django.core.urlresolvers import reverse
from datetime import datetime, timedelta, time
from dateutil.relativedelta import relativedelta
from time import time as ttime
from decimal import *
class SaleStatus(Object):
"Status of the Sale"
name = models.CharField(max_length = 512)
use_leads = models.BooleanField()
use_opportunities = models.BooleanField()
use_sales = models.BooleanField()
active = models.BooleanField()
hidden = models.BooleanField()
details = models.TextField(blank = True, null = True)
searchable = False
def __unicode__(self):
return unicode(self.name)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_status_view', args=[self.id])
except Exception:
return ""
class Meta:
"SalesStatus"
ordering = ('hidden', '-active', 'name')
class Product(Object):
"Single Product"
name = models.CharField(max_length = 512)
product_type = models.CharField(max_length=32,
default='good',
choices=(('service', 'Service'), ('good', 'Good'),
('subscription', 'Subscription'),
('compound', 'Compound'), ))
parent = models.ForeignKey('self', blank=True, null=True, related_name='child_set')
code = models.CharField(max_length=512, blank=True, null=True)
supplier = models.ForeignKey(Contact, blank=True, null=True, on_delete=models.SET_NULL)
supplier_code = models.IntegerField(blank=True, null=True)
buy_price = models.DecimalField(max_digits=20, decimal_places=2, default=0)
sell_price = models.DecimalField(max_digits=20, decimal_places=2, default=0)
stock_quantity = models.IntegerField(blank=True, null=True)
active = models.BooleanField()
runout_action = models.CharField(max_length=32, blank=True, null=True, choices=(('inactive',
'Mark Inactive'),
('notify', 'Notify'),
('ignore', 'Ignore'), ))
details = models.TextField(blank=True, null=True)
access_inherit = ('parent', '*module', '*user')
def __unicode__(self):
return unicode(self.name)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_product_view', args=[self.id])
except:
return ""
class Meta:
"Product"
ordering = ['code']
class SaleSource(Object):
"Source of Sale e.g. Search Engine"
name = models.CharField(max_length = 512)
active = models.BooleanField(default=False)
details = models.TextField(blank=True, null=True)
searchable = False
def __unicode__(self):
return unicode(self.name)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_source_view', args=[self.id])
except Exception:
return ""
class Meta:
"SaleSource"
ordering = ('-active', 'name')
class Lead(Object):
"Lead"
contact = models.ForeignKey(Contact)
source = models.ForeignKey(SaleSource, blank=True, null=True, on_delete=models.SET_NULL)
products_interested = models.ManyToManyField(Product, blank=True, null=True)
contact_method = models.CharField(max_length=32, choices=(('email', 'E-Mail'), ('phone', 'Phone'),
('post', 'Post'), ('face', 'Face to Face') ))
assigned = models.ManyToManyField(User, related_name = 'sales_lead_assigned', blank=True, null=True)
status = models.ForeignKey(SaleStatus)
details = models.TextField(blank=True, null=True)
access_inherit = ('contact', '*module', '*user')
def __unicode__(self):
return unicode(self.contact.name)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_lead_view', args=[self.id])
except Exception:
return ""
class Meta:
"Lead"
ordering = ['contact']
class Opportunity(Object):
"Opportunity"
lead = models.ForeignKey(Lead, blank=True, null=True, on_delete=models.SET_NULL)
contact = models.ForeignKey(Contact)
products_interested = models.ManyToManyField(Product)
source = models.ForeignKey(SaleSource, blank=True, null=True, on_delete=models.SET_NULL)
expected_date = models.DateField(blank=True, null=True)
closed_date = models.DateField(blank=True, null=True)
assigned = models.ManyToManyField(User, related_name = 'sales_opportunity_assigned', blank=True, null=True)
status = models.ForeignKey(SaleStatus)
probability = models.DecimalField(max_digits=3, decimal_places=0, blank=True, null=True)
amount = models.DecimalField(max_digits=20, decimal_places=2, default=0)
amount_currency = models.ForeignKey(Currency)
amount_display = models.DecimalField(max_digits=20, decimal_places=2, default=0)
details = models.TextField(blank=True, null=True)
access_inherit = ('lead', 'contact', '*module', '*user')
def __unicode__(self):
return unicode(self.contact)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_opportunity_view', args=[self.id])
except Exception:
return ""
class Meta:
"Opportunity"
ordering = ['-expected_date']
class SaleOrder(Object):
"Sale Order"
reference = models.CharField(max_length=512, blank=True, null=True)
datetime = models.DateTimeField(default=datetime.now)
client = models.ForeignKey(Contact, blank=True, null=True, on_delete=models.SET_NULL)
opportunity = models.ForeignKey(Opportunity, blank=True, null=True, on_delete=models.SET_NULL)
payment = models.ManyToManyField(Transaction, blank=True, null=True)
source = models.ForeignKey(SaleSource)
assigned = models.ManyToManyField(User, related_name = 'sales_saleorder_assigned', blank=True, null=True)
status = models.ForeignKey(SaleStatus)
currency = models.ForeignKey(Currency)
total = models.DecimalField(max_digits=20, decimal_places=2, default=0)
total_display = models.DecimalField(max_digits=20, decimal_places=2, default=0)
details = models.TextField(blank=True, null=True)
access_inherit = ('opportunity', 'client', '*module', '*user')
def fulfil(self):
"Fulfil"
for p in self.orderedproduct_set.all():
if not p.fulfilled:
product = p.product
product.stock_quantity -= p.quantity
product.save()
p.fulfilled = True
p.save()
if p.subscription:
p.subscription.renew()
def get_next_reference(self):
try:
# Very dirty hack, but kinda works for reference (i.e. it doesn't have to be unique)
next_ref = SaleOrder.objects.all().aggregate(models.Max('id'))['id__max']+1
except:
next_ref = 1
full_ref = '%.5d/%s' % (next_ref, str(str(ttime()*10)[8:-2]))
return full_ref
def save(self, *args, **kwargs):
"Automatically set order reference"
super(SaleOrder, self).save(*args, **kwargs)
try:
conf = ModuleSetting.get_for_module('maker.sales', 'order_fulfil_status')[0]
fulfil_status = long(conf.value)
if self.status.id == fulfil_status:
self.fulfil()
except Exception:
pass
def __unicode__(self):
return unicode(self.reference)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_order_view', args=[self.id])
except Exception:
return ""
def get_taxes(self, base=False):
#TODO: Compound taxes
taxes = {}
ops = self.orderedproduct_set.filter(trash=False).filter(tax__isnull=False)
for p in ops:
if base:
item_total = p.get_total()
else:
item_total = p.get_total_display()
if p.tax.id in taxes:
taxes[p.tax.id]['amount']+=(item_total * (p.tax.rate/100)).quantize(Decimal('.01'), rounding = ROUND_UP)
else:
taxes[p.tax.id] = {'name':p.tax.name, 'rate':p.tax.rate,
'amount':(item_total * (p.tax.rate/100))
.quantize(Decimal('.01'), rounding = ROUND_UP)}
return taxes
def get_taxes_total(self):
taxes = self.get_taxes()
total = 0
for tax in taxes.values():
total += tax['amount']
return total
def get_subtotal(self):
sum = 0
for p in self.orderedproduct_set.filter(trash=False):
sum += p.get_total()
self.total = sum
return sum
def get_subtotal_display(self):
sum = 0
for p in self.orderedproduct_set.filter(trash=False):
sum += p.get_total_display()
self.total_display = sum
return sum
def get_total(self):
sum = 0
for p in self.orderedproduct_set.filter(trash=False):
sum += p.get_total()
sum += self.get_taxes_total()
self.total = sum
return sum
def get_total_display(self):
sum = 0
for p in self.orderedproduct_set.filter(trash=False):
sum += p.get_total_display()
sum += self.get_taxes_total()
self.total_display = sum
return sum
def update_total(self):
self.get_total()
self.get_total_display()
self.save()
def get_total_paid(self):
return Decimal(self.payment.filter(trash=False).aggregate(models.Sum('value_display'))['value_display__sum'] or '0')
def balance_due(self):
return self.get_total() - self.get_total_paid()
class Meta:
"SaleOrder"
ordering = ['-datetime']
class Subscription(Object):
"Subscription"
client = models.ForeignKey(Contact, blank=True, null=True, on_delete=models.SET_NULL)
product = models.ForeignKey(Product, blank=True, null=True)
start = models.DateField(default=datetime.now)
expiry = models.DateField(blank=True, null=True)
cycle_period = models.CharField(max_length=32,
choices=(('daily', 'Daily'),
('weekly', 'Weekly'),
('monthly', 'Monthly'),
('quarterly','Quarterly'),
('yearly', 'Yearly')),
default='month')
cycle_end = models.DateField(blank = True, null = True)
active = models.BooleanField(default=False)
details = models.CharField(max_length = 512, blank = True, null = True)
access_inherit = ('client', 'product', '*module', '*user')
def get_cycle_start(self):
"Get the cycle start date"
if not self.cycle_end:
return None
cycle_end = self.cycle_end
#check if we're in the 5 day window before the cycle ends for this subscription
if self.cycle_period == 'monthly':
p = relativedelta(months=+1)
elif self.cycle_period == 'weekly':
p = timedelta(weeks = 1)
elif self.cycle_period == 'daily':
p = timedelta(days = 1)
elif self.cycle_period == 'quarterly':
p = relativedelta(months=+4)
elif self.cycle_period == 'yearly':
p = relativedelta(years = 1)
else:
p = relativedelta(months=+1)
cycle_start = cycle_end - p
return cycle_start
def renew(self):
"Renew"
if self.cycle_period == 'monthly':
p = relativedelta(months=+1)
elif self.cycle_period == 'daily':
p = timedelta(days = 1)
elif self.cycle_period == 'weekly':
p = timedelta(weeks = 1)
elif self.cycle_period == 'quarterly':
p = relativedelta(months=+4)
elif self.cycle_period == 'yearly':
p = relativedelta(years = 1)
else:
p = relativedelta(months=+1)
self.cycle_end = datetime.now().date() + p
self.save()
def activate(self):
"Activate"
if self.active:
return
self.renew()
self.active = True
self.save()
def deactivate(self):
"Deactivate"
if not self.active:
return
self.active = False
self.save()
def invoice(self):
"Create a new sale order for self"
new_invoice = SaleOrder()
try:
conf = ModuleSetting.get_for_module('maker.sales', 'default_order_status')[0]
new_invoice.status = long(conf.value)
except Exception:
ss = SaleStatus.objects.all()[0]
new_invoice.status = ss
so = SaleSource.objects.all()[0]
new_invoice.source = so
new_invoice.client = self.client
new_invoice.reference = "Subscription Invoice " + str(datetime.today().strftime('%Y-%m-%d'))
new_invoice.save()
try:
op = self.orderedproduct_set.filter(trash=False).order_by('-date_created')[0]
opn = OrderedProduct()
opn.order = new_invoice
opn.product = self.product
opn.quantity = op.quantity
opn.discount = op.discount
opn.subscription = self
opn.save()
except IndexError:
opn = OrderedProduct()
opn.order = new_invoice
opn.product = self.product
opn.quantity = 1
opn.subscription = self
opn.save()
return new_invoice.reference
def check_status(self):
"""
Checks and sets the state of the subscription
"""
if not self.active:
return 'Inactive'
if self.expiry:
if datetime.now() > datetime.combine(self.expiry, time.min):
self.deactivate()
return 'Expired'
if not self.cycle_end:
self.renew()
cycle_end = self.cycle_end
#check if we're in the 5 day window before the cycle ends for this subscription
if datetime.now().date() >= cycle_end:
cycle_start = self.get_cycle_start()
#if we haven't already invoiced them, invoice them
grace = 3
if (datetime.now().date() - cycle_end > timedelta(days=grace)):
#Subscription has overrun and must be shut down
return self.deactivate()
try:
conf = ModuleSetting.get_for_module('maker.sales', 'order_fulfil_status')[0]
order_fulfil_status = SaleStatus.objects.get(pk=long(conf.value))
except Exception:
order_fulfil_status = None
if self.orderedproduct_set.filter(order__datetime__gte=cycle_start).filter(order__status=order_fulfil_status):
return 'Paid'
elif self.orderedproduct_set.filter(order__datetime__gte=cycle_start):
return 'Invoiced'
else:
self.invoice()
return 'Invoiced'
else:
return 'Active'
def __unicode__(self):
return unicode(self.product)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_subscription_view', args=[self.id])
except Exception:
return ""
class Meta:
"Subscription"
ordering = ['expiry']
class OrderedProduct(Object):
"Ordered Product"
subscription = models.ForeignKey(Subscription, blank=True, null=True)
product = models.ForeignKey(Product)
quantity = models.DecimalField(max_digits=30, decimal_places=2, default=1)
discount = models.DecimalField(max_digits=5, decimal_places=2, default=0)
tax = models.ForeignKey(Tax, blank=True, null=True, on_delete=models.SET_NULL)
rate = models.DecimalField(max_digits=20, decimal_places=2)
rate_display = models.DecimalField(max_digits=20, decimal_places=2, default=0)
order = models.ForeignKey(SaleOrder)
description = models.TextField(blank=True, null=True)
fulfilled = models.BooleanField(default=False)
access_inherit = ('order', '*module', '*user')
def __unicode__(self):
return unicode(self.product)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_ordered_view', args=[self.id])
except Exception:
return ""
def get_total(self):
"Returns total sum for this item"
total = self.rate * self.quantity
if self.discount:
total = total - (total*self.discount/100)
if total < 0:
total = Decimal(0)
return total.quantize(Decimal('.01'),rounding=ROUND_UP)
def get_total_display(self):
"Returns total sum for this item in the display currency"
total = self.rate_display * self.quantity
if self.discount:
total = total - (total*self.discount/100)
if total < 0:
total = Decimal(0)
return total.quantize(Decimal('.01'),rounding=ROUND_UP)
class Meta:
"OrderedProduct"
ordering = ['product']
| |
"""
Implement several functions for manipulating ASTs of modules.
"""
import ast
import collections
import contextlib
import astor
NODES_IMPORT = ast.Import, ast.ImportFrom
class ModuleLocalsVisitor(ast.NodeVisitor):
"""Fetches all the names in an AST."""
def __init__(self):
self.locals_ = {}
def visit_Assign(self, node):
for target in node.targets:
self.locals_[target.id] = node
def visit_ClassDef(self, node):
self.locals_[node.name] = node
def visit_FunctionDef(self, node):
self.locals_[node.name] = node
def visit_ImportFrom(self, node):
for alias in node.names:
name = alias.asname if alias.asname else alias.name
self.locals_[name] = node
def visit_Import(self, node):
for alias in node.names:
# Take only the first part of dotted names where applicable
name = alias.asname if alias.asname else alias.name.split('.')[0]
self.locals_[name] = node
class NamesVisitor(ast.NodeVisitor):
"""Fetches all the names in an AST."""
def __init__(self):
self.locals_ = set()
self.scope_stack = []
@contextlib.contextmanager
def _scope(self, node):
self.scope_stack.append(node)
yield
assert self.scope_stack.pop() == node
def visit_FunctionDef(self, node):
locals_ = self.locals_.copy()
with self._scope(node):
self.generic_visit(node)
new_names = self.locals_ - locals_
self.locals_ -= new_names & get_function_arg_names(node)
def visit_Lambda(self, node):
locals_ = self.locals_.copy()
with self._scope(node):
self.generic_visit(node)
new_names = self.locals_ - locals_
self.locals_ -= new_names & get_function_arg_names(node)
def visit_Name(self, node):
self.locals_.add(node.id)
def get_module_locals(tree):
visitor = ModuleLocalsVisitor()
visitor.visit(tree)
return visitor.locals_
def get_function_locals(tree):
"""
Get all the names used by the function at *tree*.
>>> sorted(get_function_locals(ast.parse('\\n'.join([
... 'def foo():',
... ' x = list()',
... ]))))
['list', 'x']
"""
visitor = NamesVisitor()
visitor.visit(tree)
return visitor.locals_
def get_function_arg_names(node):
scoped_names = {name.id for name in node.args.args}
if node.args.vararg:
scoped_names.add(node.args.vararg)
if node.args.kwarg:
scoped_names.add(node.args.kwarg)
return scoped_names
class ClassLocalsVisitor(ast.NodeVisitor):
"""Fetches all the names in an AST."""
def __init__(self):
self.locals_ = set()
self.scope_stack = []
@contextlib.contextmanager
def _scope(self, node):
self.scope_stack.append(node)
yield
assert self.scope_stack.pop() == node
def visit_Assign(self, node):
if self.scope_stack:
self.generic_visit(node)
else:
# *node* is class variable. Only visit the right hand side.
self.visit(node.value)
def visit_FunctionDef(self, node):
locals_ = self.locals_.copy()
with self._scope(node):
self.generic_visit(node)
new_names = self.locals_ - locals_
self.locals_ -= new_names & get_function_arg_names(node)
def visit_Name(self, node):
self.locals_.add(node.id)
def get_class_locals(node):
"""
Get all the names used by the function at *tree*.
>>> sorted(get_function_locals(ast.parse('\\n'.join([
... 'class Bar(object):',
... ' classvar = list()',
... ' def method(self, x, y=1, *args, **kwargs):',
... ' return x + y + z',
... ]))))
['list', 'object', 'z']
"""
visitor = ClassLocalsVisitor()
visitor.visit(node)
return visitor.locals_
def get_names_used(tree, node):
"""Get all names in *tree* used by *node*."""
module_locals = set(get_module_locals(tree))
function_locals = get_function_locals(node)
return module_locals & function_locals
def branch(tree, node, module_name):
"""
Move *node* in *tree* and all of its dependences to *new_tree*.
"""
module_locals = get_module_locals(tree)
nodes = {
name: (
module_locals[name]
if type(module_locals[name]) in NODES_IMPORT else
ast.ImportFrom(
module_name,
[ast.alias(name, None)],
0
)
)
for name in get_names_used(tree, node)
}
nodes[''] = node # This name is discarded
return make_tree(nodes)
def make_tree(nodes):
"""
Build an AST from a map from names to nodes.
:param nodes:
A map from names to nodes.
"""
new_tree = ast.parse('')
definitions = []
import_from = collections.defaultdict(list)
for name, node in nodes.items():
if type(node) == ast.Import:
new_tree.body.append(node)
elif type(node) == ast.ImportFrom:
import_from[node.module].append(name)
else:
definitions.append(node)
for identifier, names in import_from.items():
new_tree.body.append(ast.ImportFrom(
identifier,
[ast.alias(name, None) for name in sorted(names)],
0
))
for definition in definitions:
new_tree.body.append(definition)
return new_tree
def move_nodes(tree, names, module_name):
nodes = get_module_locals(tree)
new_tree = ast.parse('')
for name in names:
try:
node = nodes[name]
except KeyError:
raise ValueError('{} not in {}'.format(name, nodes.keys()))
else:
new_tree = merge_nodes(new_tree, branch(tree, node, module_name))
return to_source(new_tree)
def merge_nodes(*nodes):
"""
Merge two module nodes together by resolving common dependencies.
"""
module_locals = {}
for node in nodes:
module_locals.update(get_module_locals(node))
return make_tree(module_locals)
def to_source(node):
return astor.codegen.to_source(node)
def get_dependencies(tree):
"""
Get a dictionary representing the dependencies between definitions in a
module.
>>> tree = ast.parse('\\n'.join([
... 'import foo',
... 'def bar():',
... ' return foo + "x"',
... 'def foobar():',
... ' return bar() + foo',
... ]))
>>> get_dependencies(tree) == {
... 'foo': set([]),
... 'bar': set(['foo']),
... 'foobar': set(['foo', 'bar']),
... }
True
"""
deps = {}
module_locals = get_module_locals(tree)
for name, node in module_locals.items():
if type(node) in NODES_IMPORT:
deps[name] = set()
elif type(node) == ast.ClassDef:
deps[name] = set(module_locals) & get_class_locals(node)
elif type(node) == ast.FunctionDef:
deps[name] = set(module_locals) & get_function_locals(node)
elif type(node) == ast.Assign:
deps[name] = get_function_locals(node.value)
else:
raise ValueError(
"Can't get dependencies for {} ({})".format(name, ast.dump(node))
)
return deps
| |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.ruby as ruby
import logging
import re
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
v1_library = gapic.ruby_library(
'redis', 'v1',
artman_output_name='google-cloud-ruby/google-cloud-redis'
)
s.copy(v1_library / 'lib/google/cloud/redis.rb')
s.copy(v1_library / 'lib/google/cloud/redis/v1')
s.copy(v1_library / 'lib/google/cloud/redis/v1.rb')
s.copy(v1_library / 'test/google/cloud/redis/v1')
s.copy(v1_library / 'README.md')
s.copy(v1_library / 'LICENSE')
s.copy(v1_library / '.gitignore')
s.copy(v1_library / '.yardopts')
s.copy(v1_library / 'google-cloud-redis.gemspec', merge=ruby.merge_gemspec)
# Copy common templates
templates = gcp.CommonTemplates().ruby_library()
s.copy(templates)
v1beta1_library = gapic.ruby_library(
'redis', 'v1beta1',
artman_output_name='google-cloud-ruby/google-cloud-redis'
)
s.copy(v1beta1_library / 'lib/google/cloud/redis/v1beta1')
s.copy(v1beta1_library / 'lib/google/cloud/redis/v1beta1.rb')
s.copy(v1beta1_library / 'test/google/cloud/redis/v1beta1')
# Support for service_address
s.replace(
[
'lib/google/cloud/redis.rb',
'lib/google/cloud/redis/v*.rb',
'lib/google/cloud/redis/v*/*_client.rb'
],
'\n(\\s+)#(\\s+)@param exception_transformer',
'\n\\1#\\2@param service_address [String]\n' +
'\\1#\\2 Override for the service hostname, or `nil` to leave as the default.\n' +
'\\1#\\2@param service_port [Integer]\n' +
'\\1#\\2 Override for the service port, or `nil` to leave as the default.\n' +
'\\1#\\2@param exception_transformer'
)
s.replace(
[
'lib/google/cloud/redis/v*.rb',
'lib/google/cloud/redis/v*/*_client.rb'
],
'\n(\\s+)metadata: nil,\n\\s+exception_transformer: nil,\n',
'\n\\1metadata: nil,\n\\1service_address: nil,\n\\1service_port: nil,\n\\1exception_transformer: nil,\n'
)
s.replace(
[
'lib/google/cloud/redis/v*.rb',
'lib/google/cloud/redis/v*/*_client.rb'
],
',\n(\\s+)lib_name: lib_name,\n\\s+lib_version: lib_version',
',\n\\1lib_name: lib_name,\n\\1service_address: service_address,\n\\1service_port: service_port,\n\\1lib_version: lib_version'
)
s.replace(
'lib/google/cloud/redis/v*/*_client.rb',
'service_path = self\\.class::SERVICE_ADDRESS',
'service_path = service_address || self.class::SERVICE_ADDRESS'
)
s.replace(
'lib/google/cloud/redis/v*/*_client.rb',
'port = self\\.class::DEFAULT_SERVICE_PORT',
'port = service_port || self.class::DEFAULT_SERVICE_PORT'
)
# https://github.com/googleapis/gapic-generator/issues/2196
s.replace(
[
'README.md',
'lib/google/cloud/redis.rb',
'lib/google/cloud/redis/v1.rb',
'lib/google/cloud/redis/v1beta1.rb'
],
'\\[Product Documentation\\]: https://cloud\\.google\\.com/redis\n',
'[Product Documentation]: https://cloud.google.com/memorystore\n')
# https://github.com/googleapis/gapic-generator/issues/2232
s.replace(
[
'lib/google/cloud/redis/v1/cloud_redis_client.rb',
'lib/google/cloud/redis/v1beta1/cloud_redis_client.rb'
],
'\n\n(\\s+)class OperationsClient < Google::Longrunning::OperationsClient',
'\n\n\\1# @private\n\\1class OperationsClient < Google::Longrunning::OperationsClient')
# https://github.com/googleapis/gapic-generator/issues/2242
def escape_braces(match):
expr = re.compile('^([^`]*(`[^`]*`[^`]*)*)([^`#\\$\\\\])\\{([\\w,]+)\\}')
content = match.group(0)
while True:
content, count = expr.subn('\\1\\3\\\\\\\\{\\4}', content)
if count == 0:
return content
s.replace(
'lib/google/cloud/**/*.rb',
'\n(\\s+)#[^\n]*[^\n#\\$\\\\]\\{[\\w,]+\\}',
escape_braces)
# https://github.com/googleapis/gapic-generator/issues/2243
s.replace(
'lib/google/cloud/redis/*/*_client.rb',
'(\n\\s+class \\w+Client\n)(\\s+)(attr_reader :\\w+_stub)',
'\\1\\2# @private\n\\2\\3')
# https://github.com/googleapis/gapic-generator/issues/2279
s.replace(
'lib/**/*.rb',
'\\A(((#[^\n]*)?\n)*# (Copyright \\d+|Generated by the protocol buffer compiler)[^\n]+\n(#[^\n]*\n)*\n)([^\n])',
'\\1\n\\6')
# https://github.com/googleapis/gapic-generator/issues/2323
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://github\\.com/GoogleCloudPlatform/google-cloud-ruby',
'https://github.com/googleapis/google-cloud-ruby'
)
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://googlecloudplatform\\.github\\.io/google-cloud-ruby',
'https://googleapis.github.io/google-cloud-ruby'
)
# https://github.com/googleapis/gapic-generator/issues/2393
s.replace(
'google-cloud-redis.gemspec',
'gem.add_development_dependency "rubocop".*$',
'gem.add_development_dependency "rubocop", "~> 0.64.0"'
)
s.replace(
'google-cloud-redis.gemspec',
'gem.add_dependency "google-gax", "~> 1\\.[\\d\\.]+"',
"\n".join([
'gem.add_dependency "google-gax", "~> 1.7"',
' gem.add_dependency "googleapis-common-protos", ">= 1.3.9", "< 2.0"'
])
)
s.replace(
'google-cloud-redis.gemspec',
'"README.md", "LICENSE"',
'"README.md", "AUTHENTICATION.md", "LICENSE"'
)
s.replace(
'.yardopts',
'README.md\n',
'README.md\nAUTHENTICATION.md\nLICENSE\n'
)
# https://github.com/googleapis/google-cloud-ruby/issues/3058
s.replace(
'google-cloud-redis.gemspec',
'\nGem::Specification.new do',
'require File.expand_path("../lib/google/cloud/redis/version", __FILE__)\n\nGem::Specification.new do'
)
s.replace(
'google-cloud-redis.gemspec',
'(gem.version\s+=\s+).\d+.\d+.\d.*$',
'\\1Google::Cloud::Redis::VERSION'
)
for version in ['v1', 'v1beta1']:
s.replace(
f'lib/google/cloud/redis/{version}/*_client.rb',
f'(require \".*credentials\"\n)\n',
f'\\1require "google/cloud/redis/version"\n\n'
)
s.replace(
f'lib/google/cloud/redis/{version}/*_client.rb',
'Gem.loaded_specs\[.*\]\.version\.version',
'Google::Cloud::Redis::VERSION'
)
# Fix links for devsite migration
for file in ['lib/**/*.rb', '*.md']:
s.replace(
file,
'https://googleapis.github.io/google-cloud-ruby/#/docs/google-cloud-logging/latest/google/cloud/logging/logger',
'https://googleapis.dev/ruby/google-cloud-logging/latest'
)
s.replace(
'*.md',
'https://googleapis.github.io/google-cloud-ruby/#/docs/.*/authentication',
'https://googleapis.dev/ruby/google-cloud-redis/latest/file.AUTHENTICATION.html'
)
s.replace(
'lib/**/*.rb',
'https://googleapis.github.io/google-cloud-ruby/#/docs/.*/authentication',
'https://googleapis.dev/ruby/google-cloud-redis/latest/file.AUTHENTICATION.html'
)
s.replace(
'README.md',
'github.io/google-cloud-ruby/#/docs/google-cloud-redis/latest/.*$',
'dev/ruby/google-cloud-redis/latest'
)
| |
import distutils
import distutils.spawn
import inspect
import logging
import pathlib
import subprocess
import tempfile
import time
import types
import warnings
from typing import Optional, List
from shlex import quote
from ray.tune.error import TuneError
from ray.util.annotations import PublicAPI
from ray.util.debug import log_once
from ray.util.ml_utils.cloud import (
S3_PREFIX,
GS_PREFIX,
HDFS_PREFIX,
ALLOWED_REMOTE_PREFIXES,
)
logger = logging.getLogger(__name__)
noop_template = ": {target}" # noop in bash
def noop(*args):
return
def get_sync_client(sync_function, delete_function=None) -> Optional["SyncClient"]:
"""Returns a sync client.
Args:
sync_function (Optional[str|function]): Sync function.
delete_function (Optional[str|function]): Delete function. Must be
the same type as sync_function if it is provided.
Raises:
ValueError if sync_function or delete_function are malformed.
"""
if sync_function is None:
return None
if delete_function and type(sync_function) != type(delete_function):
raise ValueError("Sync and delete functions must be of same type.")
if isinstance(sync_function, types.FunctionType):
delete_function = delete_function or noop
client_cls = FunctionBasedClient
elif isinstance(sync_function, str):
delete_function = delete_function or noop_template
client_cls = CommandBasedClient
else:
raise ValueError(
"Sync function {} must be string or function".format(sync_function)
)
return client_cls(sync_function, sync_function, delete_function)
def get_cloud_sync_client(remote_path) -> "CommandBasedClient":
"""Returns a CommandBasedClient that can sync to/from remote storage.
Args:
remote_path (str): Path to remote storage (S3, GS or HDFS).
Raises:
ValueError if malformed remote_dir.
"""
if remote_path.startswith(S3_PREFIX):
if not distutils.spawn.find_executable("aws"):
raise ValueError(
"Upload uri starting with '{}' requires awscli tool"
" to be installed".format(S3_PREFIX)
)
sync_up_template = (
"aws s3 sync {source} {target} " "--only-show-errors {options}"
)
sync_down_template = sync_up_template
delete_template = (
"aws s3 rm {target} --recursive " "--only-show-errors {options}"
)
exclude_template = "--exclude '{pattern}'"
elif remote_path.startswith(GS_PREFIX):
if not distutils.spawn.find_executable("gsutil"):
raise ValueError(
"Upload uri starting with '{}' requires gsutil tool"
" to be installed".format(GS_PREFIX)
)
sync_up_template = "gsutil rsync -r {options} {source} {target}"
sync_down_template = sync_up_template
delete_template = "gsutil rm -r {options} {target}"
exclude_template = "-x '{regex_pattern}'"
elif remote_path.startswith(HDFS_PREFIX):
if not distutils.spawn.find_executable("hdfs"):
raise ValueError(
"Upload uri starting with '{}' requires hdfs tool"
" to be installed".format(HDFS_PREFIX)
)
sync_up_template = "hdfs dfs -put -f {source} {target}"
sync_down_template = "hdfs dfs -get -f {source} {target}"
delete_template = "hdfs dfs -rm -r {target}"
exclude_template = None
else:
raise ValueError(
f"Upload uri must start with one of: {ALLOWED_REMOTE_PREFIXES} "
f"(is: `{remote_path}`)"
)
return CommandBasedClient(
sync_up_template, sync_down_template, delete_template, exclude_template
)
@PublicAPI(stability="beta")
class SyncClient:
"""Client interface for interacting with remote storage options."""
def sync_up(self, source, target, exclude: Optional[List] = None):
"""Syncs up from source to target.
Args:
source (str): Source path.
target (str): Target path.
exclude (List[str]): Pattern of files to exclude, e.g.
``["*/checkpoint_*]`` to exclude trial checkpoints.
Returns:
True if sync initiation successful, False otherwise.
"""
raise NotImplementedError
def sync_down(self, source, target, exclude: Optional[List] = None):
"""Syncs down from source to target.
Args:
source (str): Source path.
target (str): Target path.
exclude (List[str]): Pattern of files to exclude, e.g.
``["*/checkpoint_*]`` to exclude trial checkpoints.
Returns:
True if sync initiation successful, False otherwise.
"""
raise NotImplementedError
def delete(self, target):
"""Deletes target.
Args:
target (str): Target path.
Returns:
True if delete initiation successful, False otherwise.
"""
raise NotImplementedError
def wait(self):
"""Waits for current sync to complete, if asynchronously started."""
pass
def wait_or_retry(self, max_retries: int = 3, backoff_s: int = 5):
"""Wait for current sync to complete or retries on error."""
pass
def reset(self):
"""Resets state."""
pass
def close(self):
"""Clean up hook."""
pass
def _is_legacy_sync_fn(func) -> bool:
sig = inspect.signature(func)
try:
sig.bind_partial(None, None, None)
return False
except TypeError:
return True
class FunctionBasedClient(SyncClient):
def __init__(self, sync_up_func, sync_down_func, delete_func=None):
self.sync_up_func = sync_up_func
self._sync_up_legacy = _is_legacy_sync_fn(sync_up_func)
self.sync_down_func = sync_down_func
self._sync_down_legacy = _is_legacy_sync_fn(sync_up_func)
if self._sync_up_legacy or self._sync_down_legacy:
if log_once("func_sync_up_legacy"):
warnings.warn(
"Your sync functions currently only accepts two params "
"(a `source` and a `target`). In the future, we will "
"pass an additional `exclude` parameter. Please adjust "
"your sync function accordingly."
)
self.delete_func = delete_func or noop
def sync_up(self, source, target, exclude: Optional[List] = None):
if self._sync_up_legacy:
self.sync_up_func(source, target)
else:
self.sync_up_func(source, target, exclude)
return True
def sync_down(self, source, target, exclude: Optional[List] = None):
if self._sync_down_legacy:
self.sync_down_func(source, target)
else:
self.sync_down_func(source, target, exclude)
return True
def delete(self, target):
self.delete_func(target)
return True
NOOP = FunctionBasedClient(noop, noop)
class CommandBasedClient(SyncClient):
def __init__(
self,
sync_up_template: str,
sync_down_template: str,
delete_template: Optional[str] = noop_template,
exclude_template: Optional[str] = None,
):
"""Syncs between two directories with the given command.
Arguments:
sync_up_template (str): A runnable string template; needs to
include replacement fields ``{source}``, ``{target}``, and
``{options}``.
sync_down_template (str): A runnable string template; needs to
include replacement fields ``{source}``, ``{target}``, and
``{options}``.
delete_template (Optional[str]): A runnable string template; needs
to include replacement field ``{target}``. Noop by default.
exclude_template (Optional[str]): A pattern with possible
replacement fields ``{pattern}`` and ``{regex_pattern}``.
Will replace ``{options}}`` in the sync up/down templates
if files/directories to exclude are passed.
"""
self._validate_sync_string(sync_up_template)
self._validate_sync_string(sync_down_template)
self._validate_exclude_template(exclude_template)
self.sync_up_template = sync_up_template
self.sync_down_template = sync_down_template
self.delete_template = delete_template
self.exclude_template = exclude_template
self.logfile = None
self._closed = False
self.cmd_process = None
# Keep track of last command for retry
self._last_cmd = None
def set_logdir(self, logdir):
"""Sets the directory to log sync execution output in.
Args:
logdir (str): Log directory.
"""
self.logfile = tempfile.NamedTemporaryFile(
prefix="log_sync_out", dir=logdir, suffix=".log", delete=False
)
self._closed = False
def _get_logfile(self):
if self._closed:
raise RuntimeError(
"[internalerror] The client has been closed. "
"Please report this stacktrace + your cluster configuration "
"on Github!"
)
else:
return self.logfile
def _start_process(self, cmd: str) -> subprocess.Popen:
return subprocess.Popen(
cmd, shell=True, stderr=subprocess.PIPE, stdout=self._get_logfile()
)
def sync_up(self, source, target, exclude: Optional[List] = None):
return self._execute(self.sync_up_template, source, target, exclude)
def sync_down(self, source, target, exclude: Optional[List] = None):
# Just in case some command line sync client expects that local
# directory exists.
pathlib.Path(target).mkdir(parents=True, exist_ok=True)
return self._execute(self.sync_down_template, source, target, exclude)
def delete(self, target):
if self.is_running:
logger.warning(
f"Last sync client cmd still in progress, "
f"skipping deletion of {target}"
)
return False
final_cmd = self.delete_template.format(target=quote(target), options="")
logger.debug("Running delete: {}".format(final_cmd))
self._last_cmd = final_cmd
self.cmd_process = self._start_process(final_cmd)
return True
def wait(self):
if self.cmd_process:
_, error_msg = self.cmd_process.communicate()
error_msg = error_msg.decode("ascii")
code = self.cmd_process.returncode
args = self.cmd_process.args
self.cmd_process = None
if code != 0:
raise TuneError(
"Sync error. Ran command: {}\n"
"Error message ({}): {}".format(args, code, error_msg)
)
def wait_or_retry(self, max_retries: int = 3, backoff_s: int = 5):
assert max_retries > 0
for i in range(max_retries - 1):
try:
self.wait()
except TuneError as e:
logger.error(
f"Caught sync error: {e}. "
f"Retrying after sleeping for {backoff_s} seconds..."
)
time.sleep(backoff_s)
self.cmd_process = self._start_process(self._last_cmd)
continue
return
self.cmd_process = None
raise TuneError(f"Failed sync even after {max_retries} retries.")
def reset(self):
if self.is_running:
logger.warning("Sync process still running but resetting anyways.")
self.cmd_process = None
self._last_cmd = None
def close(self):
if self.logfile:
logger.debug(f"Closing the logfile: {str(self.logfile)}")
self.logfile.close()
self.logfile = None
self._closed = True
@property
def is_running(self):
"""Returns whether a sync or delete process is running."""
if self.cmd_process:
self.cmd_process.poll()
return self.cmd_process.returncode is None
return False
def _execute(self, sync_template, source, target, exclude: Optional[List] = None):
"""Executes sync_template on source and target."""
if self.is_running:
logger.warning(
f"Last sync client cmd still in progress, "
f"skipping sync from {source} to {target}."
)
return False
if exclude and self.exclude_template:
options = []
if "{pattern}" in self.exclude_template:
for excl in exclude:
options.append(self.exclude_template.format(pattern=excl))
elif "{regex_pattern}" in self.exclude_template:
# This is obviously not a great way to convert to regex,
# but it will do for the moment. Todo: Improve.
def _to_regex(pattern: str) -> str:
return f"({pattern.replace('*', '.*')})"
regex_pattern = "|".join(_to_regex(excl) for excl in exclude)
options.append(
self.exclude_template.format(regex_pattern=regex_pattern)
)
option_str = " ".join(options)
else:
option_str = ""
final_cmd = sync_template.format(
source=quote(source), target=quote(target), options=option_str
)
logger.debug("Running sync: {}".format(final_cmd))
self._last_cmd = final_cmd
self.cmd_process = self._start_process(final_cmd)
return True
@staticmethod
def _validate_sync_string(sync_string):
if not isinstance(sync_string, str):
raise ValueError("{} is not a string.".format(sync_string))
if "{source}" not in sync_string:
raise ValueError("Sync template missing `{source}`: " f"{sync_string}.")
if "{target}" not in sync_string:
raise ValueError("Sync template missing `{target}`: " f"{sync_string}.")
@staticmethod
def _validate_exclude_template(exclude_template):
if exclude_template:
if (
"{pattern}" not in exclude_template
and "{regex_pattern}" not in exclude_template
):
raise ValueError(
"Neither `{pattern}` nor `{regex_pattern}` found in "
f"exclude string `{exclude_template}`"
)
| |
from django.db.models.signals import post_save, post_init, pre_delete
from django.db.models import Q
from vacuous.backends import load_backend
from vacuous.exceptions import FileDoesNotExist, BranchDoesNotExist
from vacuous.signals import post_sync
_adapters = set()
def iter_adapters(flavor=None):
from django.db.models.loading import get_models
# force models to be loaded
get_models()
print "loaded adapters", _adapters
for adapter in _adapters:
if flavor is None or adapter.flavor == flavor:
yield adapter
class AdapterDescriptor(object):
def __init__(self, adapter_cls):
self.adapter_cls = adapter_cls
def __get__(self, instance, model):
if instance is None:
return self.adapter_cls
return self.adapter_cls(instance)
class AdapterBase(type):
_required_properties = (
('flavor', False), ('repo', False), ('branch', False),
('path', True), ('revision', True), ('data', True),
)
def __new__(cls, name, bases, attrs):
newcls = super(AdapterBase, cls).__new__(cls, name, bases, attrs)
if newcls.__module__ != cls.__module__:
for name, writable in cls._required_properties:
if hasattr(newcls, name):
continue
getter_name, setter_name = 'get_%s' % name, 'set_%s' % name
getter, setter = getattr(newcls, getter_name), getattr(newcls, setter_name, None)
assert getter != getattr(Adapter, getter_name), "Adapter subclasses must provide a `%s` property or a %s() method" % (name, getter_name)
if writable:
assert setter != getattr(Adapter, setter_name), "Adapter subclasses must provide a `%s` property or a %s() method" % (name, setter_name)
setattr(newcls, name, property(getter, setter))
_adapters.add(newcls)
return newcls
def register(cls, model, descriptor='vacuous'):
post_save.connect(cls.post_save, sender=model)
pre_delete.connect(cls.pre_delete, sender=model)
post_init.connect(cls.post_init, sender=model)
cls.models.add(model)
if descriptor is not None:
setattr(model, descriptor, AdapterDescriptor(cls))
def update_state(cls, obj):
adapter = cls(obj)
setattr(obj, adapter.stateattr, adapter.path)
def post_init(cls, sender, **kwargs):
cls.update_state(kwargs['instance'])
def post_save(cls, sender, **kwargs):
obj, created = kwargs['instance'], kwargs['created']
adapter = cls(obj)
if not adapter.is_active():
return
backend = adapter.get_backend()
old_path, old_data = getattr(obj, adapter.stateattr, None), None
new_path, new_data = adapter.path, adapter.data
renamed = old_path and old_path != new_path
if old_path:
try:
old_data = backend.read(old_path, branch=adapter.branch)
except (FileDoesNotExist, BranchDoesNotExist):
pass
if not new_path:
if old_path:
backend.delete(old_path)
elif old_data != new_data:
adapter.write()
if renamed:
backend.delete(old_path)
elif renamed:
backend.rename(old_path, new_path)
cls.update_state(obj)
def pre_delete(cls, sender, **kwargs):
adapter = cls(kwargs['instance'])
if adapter.is_active():
adapter.delete()
def get_paths_q(cls, paths):
return Q(**{"%s__in" % self.path_attr: paths})
def filter(cls, queryset, paths=None, branch=None):
raise NotImplementedError()
def iter_objects(cls, paths=None, branch=None):
for model in cls.models:
for obj in cls.filter(model.objects.all(), paths=paths, branch=branch):
yield obj
def proxy(cls, attr):
return property(
lambda self: getattr(self.obj, attr),
lambda self, value: setattr(self.obj, attr, value),
)
class Adapter(object):
__metaclass__ = AdapterBase
models = set()
stateattr = '_vacuous_state'
proxies = {}
encoding = 'utf-8'
def __init__(self, obj):
if type(obj) not in type(self).models:
raise TypeError("%s is not registered for type %s" % (type(self), type(obj)))
self.obj = obj
## repo properties
def get_flavor(self):
return self.flavor
def get_repo(self, obj):
return self.repo
def get_branch(self, obj):
return self.branch
def get_encoding(self):
return self.encoding
def get_path(self):
return self.path
def set_path(self, path):
self.path = path
def get_revision(self):
return self.revision
def set_revision(self, revision):
self.revision = revision
def get_data(self):
return self.data
def set_data(self, data):
self.data = data
## utility methods ##
def is_active(self):
return self.flavor and self.repo and self.path
def get_backend(self, cached=True):
if not self.flavor or not self.path:
return None
return load_backend(self.flavor, self.repo)
def read(self, revision=None):
if revision is None:
revision = self.revision
self.get_backend().read(self.path, revision)
def write(self, data=None):
if data is None:
data = self.data
self.get_backend().write(self.path, data)
def delete(self):
self.get_backend().delete(self.path)
def load(self, revision):
backend = self.get_backend()
self.set_data(backend.read(self.path, revision=revision))
def sync(self, commit):
backend = self.get_backend()
self.load(commit.revision)
self.obj.save()
post_sync.send_robust(
sender=type(self.obj),
adapter=self,
instance=self.obj,
commit=commit,
)
def history(self):
return self.get_backend().history(path=self.path)
| |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for xarray datasets, naming and metadata.
Note on metadata conventions:
When we store data onto xarray.Dataset objects, we are (currently) a little
sloppy about coordinate metadata: we store only a single array for each set of
coordinate values, even though components of our velocity fields are typically
staggered. This is convenient for quick-and-dirty analytics, but means that
variables at the "same" coordinates location may actually be dislocated by any
offset within the unit cell.
"""
import functools
from typing import Any, Dict, Mapping, Optional, Tuple, Union
import jax
import jax.numpy as jnp
from jax_cfd.base import grids
import numpy as np
import pandas
import xarray
Array = grids.Array
GridArray = grids.GridArray
GridVariable = grids.GridVariable
# pytype complains about valid operations with xarray (e.g., see b/153704639),
# so it isn't worth the trouble of running it.
# pytype: skip-file
#
# xarray `Dataset` names for coordinates and attributes.
#
XR_VELOCITY_NAMES = ('u', 'v', 'w')
XR_SCALAR_NAMES = ('c')
XR_SPATIAL_DIMS = ('x', 'y', 'z')
XR_WAVENUMBER_DIMS = ('kx', 'ky', 'kz')
XR_SAMPLE_NAME = 'sample'
XR_TIME_NAME = 'time'
XR_OFFSET_NAME = 'offset'
XR_SAVE_GRID_SIZE_ATTR_NAME = 'save_grid_size'
XR_DOMAIN_SIZE_NAME = 'domain_size'
XR_NDIM_ATTR_NAME = 'ndim'
XR_STABLE_TIME_STEP_ATTR_NAME = 'stable_time_step'
def velocity_trajectory_to_xarray(
trajectory: Tuple[Union[Array, GridArray, GridVariable], ...],
grid: grids.Grid = None,
time: np.ndarray = None,
attrs: Dict[str, Any] = None,
samples: bool = False,
prefix_name: str = '',
) -> xarray.Dataset:
"""Convert a trajectory of velocities to an xarray `Dataset`."""
dimension = len(trajectory)
if grid is not None:
dimension = grid.ndim
dims = (((XR_SAMPLE_NAME,) if samples else ())
+ (XR_TIME_NAME,)
+ XR_SPATIAL_DIMS[:dimension])
data_vars = {}
for component in range(dimension):
name = XR_VELOCITY_NAMES[component]
data = trajectory[component]
if isinstance(data, GridArray) or isinstance(data, GridVariable):
data = data.data
var_attrs = {}
if grid is not None:
var_attrs[XR_OFFSET_NAME] = grid.cell_faces[component]
data_vars[prefix_name + name] = xarray.Variable(dims, data, var_attrs)
for component in range(dimension, len(trajectory)):
name = XR_SCALAR_NAMES[component - dimension]
data = trajectory[component]
var_attrs = {}
if isinstance(data, GridArray) or isinstance(data, GridVariable):
var_attrs[XR_OFFSET_NAME] = data.offset
data = data.data
data_vars[prefix_name + name] = xarray.Variable(dims, data, var_attrs)
if samples:
num_samples = next(iter(data_vars.values())).shape[0]
sample_ids = np.arange(num_samples)
else:
sample_ids = None
coords = construct_coords(grid, time, sample_ids)
return xarray.Dataset(data_vars, coords, attrs)
def construct_coords(
grid: Optional[grids.Grid] = None,
times: Optional[np.ndarray] = None,
sample_ids: Optional[np.ndarray] = None,
) -> Mapping[str, np.ndarray]:
"""Create coordinate arrays."""
coords = {}
if grid is not None:
axes = grid.axes(grid.cell_center)
coords.update({dim: ax for dim, ax in zip(XR_SPATIAL_DIMS, axes)})
if times is not None:
coords[XR_TIME_NAME] = times
if sample_ids is not None:
coords[XR_SAMPLE_NAME] = sample_ids
return coords
def grid_from_attrs(dataset_attrs) -> grids.Grid:
"""Constructs a `Grid` object from dataset attributes."""
grid_size = dataset_attrs[XR_SAVE_GRID_SIZE_ATTR_NAME]
ndim = dataset_attrs[XR_NDIM_ATTR_NAME]
grid_shape = (grid_size,) * ndim
if XR_DOMAIN_SIZE_NAME in dataset_attrs:
domain_size = dataset_attrs[XR_DOMAIN_SIZE_NAME]
elif 'domain_size_multiple' in dataset_attrs:
# TODO(shoyer): remove this legacy case, once we no longer use datasets
# generated prior to 2020-09-18
domain_size = 2 * np.pi * dataset_attrs['domain_size_multiple']
else:
raise ValueError(
f'could not figure out domain size from attrs:\n{dataset_attrs}')
grid_domain = [(0, domain_size)] * ndim
grid = grids.Grid(grid_shape, domain=grid_domain)
return grid
def vorticity_2d(ds: xarray.Dataset) -> xarray.DataArray:
"""Calculate vorticity on a 2D dataset."""
# Vorticity is calculated from staggered velocities at offset=(1, 1).
dy = ds.y[1] - ds.y[0]
dx = ds.x[1] - ds.x[0]
dv_dx = (ds.v.roll(x=-1, roll_coords=False) - ds.v) / dx
du_dy = (ds.u.roll(y=-1, roll_coords=False) - ds.u) / dy
return (dv_dx - du_dy).rename('vorticity')
def enstrophy_2d(ds: xarray.Dataset) -> xarray.DataArray:
"""Calculate entrosphy over a 2D dataset."""
return (vorticity_2d(ds) ** 2 / 2).rename('enstrophy')
def magnitude(
u: xarray.DataArray,
v: Optional[xarray.DataArray] = None,
w: Optional[xarray.DataArray] = None,
) -> xarray.DataArray:
"""Calculate the magnitude of a velocity field."""
total = sum((c * c.conj()).real for c in [u, v, w] if c is not None)
return total ** 0.5
def speed(ds: xarray.Dataset) -> xarray.DataArray:
"""Calculate speed at each point in a velocity field."""
args = [ds[k] for k in XR_VELOCITY_NAMES if k in ds]
return magnitude(*args).rename('speed')
def kinetic_energy(ds: xarray.Dataset) -> xarray.DataArray:
"""Calculate kinetic energy at each point in a velocity field."""
return (speed(ds) ** 2 / 2).rename('kinetic_energy')
def fourier_transform(array: xarray.DataArray) -> xarray.DataArray:
"""Calculate the fourier transform of an array, with labeled coordinates."""
# TODO(shoyer): consider switching to use xrft? https://github.com/xgcm/xrft
dims = [dim for dim in XR_SPATIAL_DIMS if dim in array.dims]
axes = [-1, -2, -3][:len(dims)]
result = xarray.apply_ufunc(
functools.partial(np.fft.fftn, axes=axes), array,
input_core_dims=[dims],
output_core_dims=[['k' + d for d in dims]],
output_sizes={'k' + d: array.sizes[d] for d in dims},
output_dtypes=[np.complex128],
dask='parallelized')
for d in dims:
step = float(array.coords[d][1] - array.coords[d][0])
freqs = 2 * np.pi * np.fft.fftfreq(array.sizes[d], step)
result.coords['k' + d] = freqs
# Ensure frequencies are in ascending order (equivalent to fftshift)
rolls = {'k' + d: array.sizes[d] // 2 for d in dims}
return result.roll(rolls, roll_coords=True)
def periodic_correlate(u, v):
"""Periodic correlation of arrays `u`, `v`, implemented using the FFT."""
return np.fft.ifft(np.fft.fft(u).conj() * np.fft.fft(v)).real
def spatial_autocorrelation(array, spatial_axis='x'):
"""Computes spatial autocorrelation of `array` along `spatial_axis`."""
spatial_axis_size = array.sizes[spatial_axis]
out_axis_name = 'd' + spatial_axis
full_result = xarray.apply_ufunc(
lambda x: periodic_correlate(x, x) / spatial_axis_size, array,
input_core_dims=[[spatial_axis]],
output_core_dims=[[out_axis_name]])
# we only report the unique half of the autocorrelation.
num_unique_displacements = spatial_axis_size // 2
result = full_result.isel({out_axis_name: slice(0, num_unique_displacements)})
displacement_coords = array.coords[spatial_axis][:num_unique_displacements]
result.coords[out_axis_name] = (out_axis_name, displacement_coords)
return result
@functools.partial(jax.jit, static_argnums=(0,), backend='cpu')
def _jax_numpy_add_at_zeros(shape, indices, values):
result = jnp.zeros(shape, dtype=values.dtype)
# equivalent to np.add.at(result, (..., indices), array), but much faster
return result.at[..., indices].add(values)
def _binned_sum_numpy(
array: np.ndarray,
indices: np.ndarray,
num_bins: int,
) -> np.ndarray:
"""NumPy helper function for summing over bins."""
mask = np.logical_not(np.isnan(indices))
int_indices = indices[mask].astype(int)
shape = array.shape[:-indices.ndim] + (num_bins,)
result = _jax_numpy_add_at_zeros(shape, int_indices, array[..., mask])
return np.asarray(result)
def groupby_bins_sum(
array: xarray.DataArray,
group: xarray.DataArray,
bins: np.ndarray,
labels: np.ndarray,
) -> xarray.DataArray:
"""Faster equivalent of Xarray's groupby_bins(...).sum()."""
# TODO(shoyer): remove this in favor of groupby_bin() once xarray's
# implementation is improved: https://github.com/pydata/xarray/issues/4473
bin_name = group.name + '_bins'
indices = group.copy(
data=pandas.cut(np.ravel(group), bins, labels=False).reshape(group.shape)
)
result = xarray.apply_ufunc(
_binned_sum_numpy, array, indices,
input_core_dims=[indices.dims, indices.dims],
output_core_dims=[[bin_name]],
output_dtypes=[array.dtype],
output_sizes={bin_name: labels.size},
kwargs={'num_bins': bins.size - 1},
dask='parallelized',
)
result[bin_name] = labels
return result
def _isotropize_binsum(ndim, energy):
"""Calculate energy spectrum summing over bins in wavenumber space."""
wavenumbers = [energy[name] for name in XR_WAVENUMBER_DIMS[:ndim]]
k_spacing = max(float(k[1] - k[0]) for k in wavenumbers)
k_max = min(float(w.max()) for w in wavenumbers) - 0.5 * k_spacing
k = magnitude(*wavenumbers).rename('k')
bounds = k_spacing * (np.arange(1, round(k_max / k_spacing) + 2) - 0.5)
labels = k_spacing * np.arange(1, round(k_max / k_spacing) + 1)
binned = groupby_bins_sum(energy, k, bounds, labels)
spectrum = binned.rename(k_bins='k')
return spectrum
def _isotropize_interpolation_2d(
energy, interpolation_method, num_quadrature_points,
):
"""Caclulate energy spectrum of a 2D signal with interpolation."""
# Calculate even spaced discrete levels for wavenumber magnitude
wavenumbers = [energy[name] for name in XR_WAVENUMBER_DIMS[:2]]
k_spacing = max(float(k[1] - k[0]) for k in wavenumbers)
k_max = min(float(w.max()) for w in wavenumbers) - 0.5 * k_spacing
k_values = k_spacing * np.arange(1, round(k_max / k_spacing) + 1)
k = xarray.DataArray(k_values, dims='k', coords={'k': k_values})
angle_values = np.linspace(
0, 2 * np.pi, num=num_quadrature_points, endpoint=False)
angle = xarray.DataArray(angle_values, dims='angle')
# Sample the spectrum at each point on the boundary of the circle with
# with radius k
kx = k * np.cos(angle)
ky = k * np.sin(angle)
# Interpolation on log(energy), which is much smoother in wavenumber space
# than the energy itself (which decays quite rapidly)
density = np.exp(
np.log(energy).interp(kx=kx, ky=ky, method=interpolation_method)
)
# Integrate over the edge of each circle
spectrum = 2 * np.pi * k * density.mean('angle')
return spectrum
def isotropize(
array: xarray.DataArray,
method: Optional[str] = None,
interpolation_method: str = 'linear',
num_quadrature_points: int = 100,
) -> xarray.DataArray:
"""Isotropize an ND spectrum by averaging over all angles.
Args:
array: array to isotropically average, with one or more dimensions
correspondings to wavenumbers.
method: either "interpolation" or "binsum".
interpolation_method: either "linear" or "nearest". Only used if using
method="interpolation".
num_quadrature_points: number of points to use when integrating over
wavenumbers with method="interpolation".
Returns:
Energy spectra as a function of wavenumber magnitude.
"""
ndim = sum(dim in array.dims for dim in XR_WAVENUMBER_DIMS)
if ndim == 0:
raise ValueError(f'no frequency dimensions found: {array.dims}')
if method is None:
method = 'interpolation' if ndim == 2 else 'binsum'
if method == 'interpolation':
if ndim != 2:
raise ValueError('interpolation not yet supported for non-2D inputs')
# TODO(shoyer): switch to more accurate algorithms for both 1D and 3D, too:
# - 1D can simply add up the energy at positive and negative frequencies
# - 3D can efficiently integrate over all angles using Lebedev quadrature:
# https://en.wikipedia.org/wiki/Lebedev_quadrature
return _isotropize_interpolation_2d(
array, interpolation_method, num_quadrature_points)
elif method == 'binsum':
# NOTE(shoyer): I believe this function is equivalent to
# xrft.isotropize(), but is faster & more efficient because we
# use groupby_bins_sum(). See https://github.com/xgcm/xrft/issues/9
return _isotropize_binsum(ndim, array)
else:
raise ValueError(f'invalid method: {method}')
def energy_spectrum(ds: xarray.Dataset) -> xarray.DataArray:
"""Calculate the kinetic energy spectra at each wavenumber.
Args:
ds: dataset with `u`, `v` and/or `w` velocity components and corresponding
spatial dimensions.
Returns:
Energy spectra as a function of wavenumber instead of space.
"""
ndim = sum(dim in ds.dims for dim in 'xyz')
velocity_components = list(XR_VELOCITY_NAMES[:ndim])
fourier_ds = ds[velocity_components].map(fourier_transform)
return kinetic_energy(fourier_ds)
def isotropic_energy_spectrum(
ds: xarray.Dataset,
average_dims: Tuple[str, ...] = (),
) -> xarray.DataArray:
"""Calculate the energy spectra at each scalar wavenumber.
Args:
ds: dataset with `u`, `v` and/or `w` velocity components and corresponding
spatial dimensions.
average_dims: dimensions to average over before isotropic averaging.
Returns:
Energy spectra as a function of wavenumber magnitude, without spatial
dimensions.
"""
return isotropize(energy_spectrum(ds).mean(average_dims))
def velocity_spatial_correlation(
ds: xarray.Dataset,
axis: str
) ->xarray.Dataset:
"""Computes velocity correlation along `axis` for all velocity components."""
ndim = sum(dim in ds.dims for dim in 'xyz')
velocity_components = list(XR_VELOCITY_NAMES[:ndim])
correlation_fn = lambda x: spatial_autocorrelation(x, axis)
correlations = ds[velocity_components].map(correlation_fn)
name_mapping = {u: '_'.join([u, axis, 'correlation'])
for u in velocity_components}
return correlations.rename(name_mapping)
def normalize(array: xarray.DataArray, state_dims: Tuple[str, ...]):
"""Returns `array` with slices along `state_dims` normalized to unity."""
norm = np.sqrt((array ** 2).sum(state_dims))
return array / norm
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter construction support.
This module contains a base class for all converters, as well as supporting
structures. These structures are referred to as contexts.
The class hierarchy is as follows:
<your converter>
[extends] converter.Base
[extends] transformer.Base
[extends] gast.nodeTransformer
[uses] transfomer.SourceInfo
[uses] converter.EntityContext
[uses] converter.ProgramContext
[uses] transfomer.SourceInfo
converter.Base is a specialization of transformer.Base for AutoGraph. It's a
very lightweight subclass that adds a `ctx` attribute holding the corresponding
EntityContext object (see below). Note that converters are not reusable, and
`visit` will raise an error if called more than once.
converter.EntityContext contains mutable state associated with an entity that
the converter processes.
converter.ProgramContext contains mutable state across related entities. For
example, when converting several functions that call one another, the
ProgramContext should be shared across these entities.
Below is the overal flow at conversion:
program_ctx = ProgramContext(<entities to convert>, <global settings>, ...)
while <program_ctx has more entities to convert>:
entity, source_info = <get next entity from program_ctx>
entity_ctx = EntityContext(program_ctx, source_info)
for <each ConverterClass>:
converter = ConverterClass(entity_ctx)
# May update entity_ctx and program_ctx
entity = converter.visit(entity)
<add entity's dependencies to program_ctx>
Note that pyct contains a small number of transformers used for static analysis.
These implement transformer.Base, rather than converter.Base, to avoid a
dependency on AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from enum import Enum
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.core import naming
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import live_values
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.autograph.pyct.static_analysis import type_info
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
# TODO(mdan): Add a test specific to this converter.
class ProgramContext(object):
"""ProgramContext keeps track of converting function hierarchies.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
recursive: bool, whether to recursively convert any functions that the
decorator function may call.
autograph_decorators: Tuple[Callable, ...], decorator functions that belong
to AutoGraph. These require special treatment.
dependency_cache: Dict[Any, ast.AST], the original entities mapped to their
converted AST
additional_imports: Set[Any], additional entities which for any reason
cannot be attached after loading and need to be explicitly imported
in the generated code
name_map: Dict[str, str], map of original entity name to the name of
their converted counterparts
autograph_module: Module, a reference to the autograph module. This
needs to be specified by the caller to avoid circular dependencies.
uncompiled_modules: Set[Tuple[str, ...]], with each tuple representing the
fully qualified name of a package containing functions that will not be
compiled.
required_imports: str, containing an import statement on each line. These
are all the imports necessary for the compiled code to run, in addition
to the closures of each entity, which are attached dynamically.
"""
def __init__(
self,
recursive,
autograph_decorators,
partial_types,
autograph_module,
uncompiled_modules,
):
self.recursive = recursive
self.autograph_decorators = autograph_decorators
self.partial_types = partial_types if partial_types else ()
self.autograph_module = autograph_module
self.uncompiled_modules = uncompiled_modules
# Required to output dependencies in discovery order, which should match
# the reverse dependency order.
self.dependency_cache = collections.OrderedDict()
self.additional_imports = set()
self.name_map = {}
@property
def required_imports(self):
"""Returns a block containing all imports required by the converted code."""
# TODO(mdan): Check that these don't clobber one another.
return '\n'.join(config.COMPILED_IMPORT_STATEMENTS +
tuple(self.additional_imports))
def new_namer(self, namespace):
return naming.Namer(namespace, self.recursive, self.name_map,
self.partial_types)
def update_name_map(self, namer):
"""Updates renamed_calls based on the recent activity from the namer.
Whenever we convert a new entity, any references to other entities are being
renamed to match their soon-to-be-converted counterparts. The namer keeps
track of these renames. When conversion is complete, we copy those renames
so that when those referenced entities are being converted, their new name
matches.
Args:
namer: naming.Namer
Raises:
ValueError: when an entity was renamed twice and to different names.
"""
# TODO(mdan): Have call_trees do this directly.
# This is done so indirectly, via the namer, for historic reasons. But
# now we can have the converter that does the rename record the new name
# as well and skip this step altogether.
for o, name in namer.renamed_calls.items():
if o in self.name_map:
if self.name_map[o] != name:
raise ValueError(
'Calls to %s were converted using multiple names (%s). This is '
'possible when an entity with one of these names already '
'existed. To fix, avoid using any of these names.' %
(o, (name, self.name_map[o])))
else:
self.name_map[o] = name
def add_to_cache(self, original_entity, converted_ast):
self.dependency_cache[original_entity] = converted_ast
class EntityContext(object):
"""Tracks the conversion of a single entity.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
namer: Namer
info: transformer.EntityInfo
program: ProgramContext
"""
def __init__(self, namer, entity_info, program_ctx):
self.namer = namer
self.info = entity_info
self.program = program_ctx
class Base(transformer.Base):
"""All converters should inherit from this class.
Attributes:
ctx: EntityContext
"""
def __init__(self, ctx):
super(Base, self).__init__(ctx.info)
self.ctx = ctx # Keeping this short because it's used frequently.
self._used = False
self._ast_depth = 0
def get_definition_directive(self, node, directive, arg, default):
"""Returns the unique directive for a symbol, or a default if none exist.
See lang/directives.py for details on directives.
Args:
node: ast.AST
directive: Callable[..., Any]
arg: str
default: Any
Raises:
ValueError: if conflicting annotations have been found
"""
defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
if not defs:
return default
# TODO(mdan): Simplify this.
arg_values = []
for def_ in defs:
if (directive not in def_.directives or
arg not in def_.directives[directive]):
continue
arg_value = def_.directives[directive][arg]
for prev_value in arg_values:
if not ast_util.matches(arg_value, prev_value):
qn = anno.getanno(node, anno.Basic.QN)
raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' %
(qn, directive.__name__, arg,
compiler.ast_to_source(arg_value).strip(),
compiler.ast_to_source(prev_value).strip()))
arg_values.append(arg_value)
if not arg_values:
return default
arg_value, = arg_values
return arg_value
def visit(self, node):
if not self._ast_depth:
if self._used:
raise ValueError('converter objects cannot be reused')
self._used = True
self._ast_depth += 1
try:
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
class AnnotatedDef(reaching_definitions.Definition):
def __init__(self):
super(AnnotatedDef, self).__init__()
self.directives = {}
class AgAnno(Enum):
"""Annotation labels specific to AutoGraph. See anno.py."""
DIRECTIVES = 'User directives associated with the annotated statement.'
def __repr__(self):
return self.name
def standard_analysis(node, context, is_initial=False):
"""Performs a complete static analysis of the given code.
Args:
node: ast.AST
context: converter.EntityContext
is_initial: bool, whether this is the initial analysis done on the input
source code
Returns:
ast.AST, same as node, with the static analysis annotations added
"""
# TODO(mdan): Clear static analysis here.
# TODO(mdan): Consider not running all analyses every time.
# TODO(mdan): Don't return a node because it's modified by reference.
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, context.info, None)
node = reaching_definitions.resolve(node, context.info, graphs, AnnotatedDef)
node = liveness.resolve(node, context.info, graphs)
node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
node = type_info.resolve(node, context.info)
# This second call allows resolving first-order class attributes.
node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
if is_initial:
anno.dup(
node,
{
anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
},
)
return node
def apply_(node, context, converter_module):
"""Applies a converter to an AST.
Args:
node: ast.AST
context: converter.EntityContext
converter_module: converter.Base
Returns:
ast.AST, the result of applying converter to node
"""
node = standard_analysis(node, context)
node = converter_module.transform(node, context)
return node
| |
"""Support for Notion."""
from __future__ import annotations
import asyncio
from datetime import timedelta
from typing import Any
from aionotion import async_get_client
from aionotion.errors import InvalidCredentialsError, NotionError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import (
aiohttp_client,
config_validation as cv,
device_registry as dr,
)
from homeassistant.helpers.entity import EntityDescription
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import DATA_COORDINATOR, DOMAIN, LOGGER
PLATFORMS = ["binary_sensor", "sensor"]
ATTR_SYSTEM_MODE = "system_mode"
ATTR_SYSTEM_NAME = "system_name"
DEFAULT_ATTRIBUTION = "Data provided by Notion"
DEFAULT_SCAN_INTERVAL = timedelta(minutes=1)
CONFIG_SCHEMA = cv.deprecated(DOMAIN)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Notion as a config entry."""
hass.data.setdefault(DOMAIN, {DATA_COORDINATOR: {}})
if not entry.unique_id:
hass.config_entries.async_update_entry(
entry, unique_id=entry.data[CONF_USERNAME]
)
session = aiohttp_client.async_get_clientsession(hass)
try:
client = await async_get_client(
entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD], session=session
)
except InvalidCredentialsError:
LOGGER.error("Invalid username and/or password")
return False
except NotionError as err:
LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady from err
async def async_update() -> dict[str, dict[str, Any]]:
"""Get the latest data from the Notion API."""
data: dict[str, dict[str, Any]] = {"bridges": {}, "sensors": {}, "tasks": {}}
tasks = {
"bridges": client.bridge.async_all(),
"sensors": client.sensor.async_all(),
"tasks": client.task.async_all(),
}
results = await asyncio.gather(*tasks.values(), return_exceptions=True)
for attr, result in zip(tasks, results):
if isinstance(result, NotionError):
raise UpdateFailed(
f"There was a Notion error while updating {attr}: {result}"
)
if isinstance(result, Exception):
raise UpdateFailed(
f"There was an unknown error while updating {attr}: {result}"
)
for item in result:
if attr == "bridges" and item["id"] not in data["bridges"]:
# If a new bridge is discovered, register it:
hass.async_create_task(async_register_new_bridge(hass, item, entry))
data[attr][item["id"]] = item
return data
coordinator = hass.data[DOMAIN][DATA_COORDINATOR][
entry.entry_id
] = DataUpdateCoordinator(
hass,
LOGGER,
name=entry.data[CONF_USERNAME],
update_interval=DEFAULT_SCAN_INTERVAL,
update_method=async_update,
)
await coordinator.async_config_entry_first_refresh()
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a Notion config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN][DATA_COORDINATOR].pop(entry.entry_id)
return unload_ok
async def async_register_new_bridge(
hass: HomeAssistant, bridge: dict, entry: ConfigEntry
) -> None:
"""Register a new bridge."""
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, bridge["hardware_id"])},
manufacturer="Silicon Labs",
model=bridge["hardware_revision"],
name=bridge["name"] or bridge["id"],
sw_version=bridge["firmware_version"]["wifi"],
)
class NotionEntity(CoordinatorEntity):
"""Define a base Notion entity."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
task_id: str,
sensor_id: str,
bridge_id: str,
system_id: str,
description: EntityDescription,
) -> None:
"""Initialize the entity."""
super().__init__(coordinator)
bridge = self.coordinator.data["bridges"].get(bridge_id, {})
sensor = self.coordinator.data["sensors"][sensor_id]
self._attr_device_info = {
"identifiers": {(DOMAIN, sensor["hardware_id"])},
"manufacturer": "Silicon Labs",
"model": sensor["hardware_revision"],
"name": sensor["name"],
"sw_version": sensor["firmware_version"],
"via_device": (DOMAIN, bridge.get("hardware_id")),
}
self._attr_extra_state_attributes = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._attr_name = f'{sensor["name"]}: {description.name}'
self._attr_unique_id = (
f'{sensor_id}_{coordinator.data["tasks"][task_id]["task_type"]}'
)
self._bridge_id = bridge_id
self._sensor_id = sensor_id
self._system_id = system_id
self._task_id = task_id
self.entity_description = description
@property
def available(self) -> bool:
"""Return True if entity is available."""
return (
self.coordinator.last_update_success
and self._task_id in self.coordinator.data["tasks"]
)
async def _async_update_bridge_id(self) -> None:
"""Update the entity's bridge ID if it has changed.
Sensors can move to other bridges based on signal strength, etc.
"""
sensor = self.coordinator.data["sensors"][self._sensor_id]
# If the sensor's bridge ID is the same as what we had before or if it points
# to a bridge that doesn't exist (which can happen due to a Notion API bug),
# return immediately:
if (
self._bridge_id == sensor["bridge"]["id"]
or sensor["bridge"]["id"] not in self.coordinator.data["bridges"]
):
return
self._bridge_id = sensor["bridge"]["id"]
device_registry = await dr.async_get_registry(self.hass)
this_device = device_registry.async_get_device(
{(DOMAIN, sensor["hardware_id"])}
)
bridge = self.coordinator.data["bridges"][self._bridge_id]
bridge_device = device_registry.async_get_device(
{(DOMAIN, bridge["hardware_id"])}
)
if not bridge_device or not this_device:
return
device_registry.async_update_device(
this_device.id, via_device_id=bridge_device.id
)
@callback
def _async_update_from_latest_data(self) -> None:
"""Update the entity from the latest data."""
raise NotImplementedError
@callback
def _handle_coordinator_update(self) -> None:
"""Respond to a DataUpdateCoordinator update."""
if self._task_id in self.coordinator.data["tasks"]:
self.hass.async_create_task(self._async_update_bridge_id())
self._async_update_from_latest_data()
self.async_write_ha_state()
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
self._async_update_from_latest_data()
| |
import re
import base64
import json
import urllib2
import urllib
from django.shortcuts import render,HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.core.mail import EmailMultiAlternatives, send_mail, get_connection
from django.utils.html import strip_tags
from django.template.loader import render_to_string
from django.db.models import Max, Count, Sum
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import user_passes_test
from django.views.decorators.csrf import csrf_exempt
import logging
#cutom imports
from python_quizzup import settings
from pyquiz.models import Questions, Choices, LeaderBoard, QuizHistory, CustomUser as User, UserAnswers, UserBadges, Badges, GCMRegistrations
from pyquiz import utils
import config
logger = logging.getLogger(__name__)
def home(request):
context = {}
logger.debug("this is a debug message!")
print request.META['HTTP_USER_AGENT']
return render(request,'pyquiz/home.html', context)
@login_required
def index(request):
context = {}
latest_week = Questions.objects.all().values('week_id').order_by('-week_id')
if latest_week:
latest_week = latest_week[0]
print latest_week
last_quiz = LeaderBoard.objects.filter(user_id = request.user.id).order_by('-week_id')
print last_quiz
context['week_id'] = latest_week['week_id'] if not last_quiz or last_quiz[0].week_id != latest_week['week_id'] else ''
context['other_weeks'] = []
quiz_id_history = [ quiz.week_id for quiz in QuizHistory.objects.filter(user_id=request.user.id)]
for quiz_id in xrange(3,latest_week['week_id']):
if quiz_id not in quiz_id_history:
context['other_weeks'].append(quiz_id)
badges = Badges.objects.all()
user_badges_obj = UserBadges.objects.filter(user_id=request.user.id)
user_badges = [ item.badge_id for item in user_badges_obj ]
context['user_badges_count'] = len(user_badges_obj)
context['badges'] = {}
for badge in badges:
context['badges'][badge.badge_id] = {'badge_details':badge}
if badge in user_badges:
context['badges'][badge.badge_id]['unlocked'] = True
# context['week_id'] = '' #TO TEST THE NO ACTIVE QUIZ LOGIC
return render(request,'pyquiz/index.html', context)
@login_required
def quiz(request, week_id):
print week_id
if request.method == "GET":
last_seen_quiz = QuizHistory.objects.filter(user_id = request.user.id, week_id=week_id)
if last_seen_quiz:
return render(request, 'pyquiz/404.html', {})
latest_week = Questions.objects.all().values('week_id').order_by('-week_id')[0]
print latest_week
# if not (last_quiz and week_id != last_quiz[0].week_id+1): #TEST INVALID WEEKID LOGIC
if request.method == "GET":
quiz_obj = QuizHistory(user_id=request.user, week_id=week_id).save()
questions = Questions.objects.filter(week_id = int(week_id)).order_by('?')
print questions
questions_set = []
for question in questions:
question.question = question.question.replace('<code>','<div class="callout callout-info"><pre>').replace('</code>','</pre></div>')
questions_set.append({'question':question, 'choices':Choices.objects.get(question_id = question.id)})
print questions_set
return render(request,'pyquiz/quiz.html',{'questions_set':questions_set})
else:
print request.POST
score = 0
for field, value in request.POST.items():
if not re.match('(csrfmiddlewaretoken)|(t_(\d+))|(question_(\d+))|(timeout_(\d+))', field):
# print value,Choices.objects.get(question_id = field).answer
# print value == Choices.objects.get(question_id = field).answer
user_answer_obj = UserAnswers(user_id=request.user, question=request.POST['question_' + field], user_answer=value, week_id=week_id)
if value == Choices.objects.get(question_id = field).answer:
user_answer_obj.is_correct = True
print score
score += 15 + min(int(request.POST['t_' + field]), 10)*1.5
print score
else:
user_answer_obj.is_correct = False
user_answer_obj.save()
LeaderBoard(user_id = request.user, week_id = week_id, points = score).save()
badge_id = None
if score>=100 and score<200:
badge_id = 6
elif score>=200 and score<250:
badge_id = 7
elif score>=250:
badge_id = 8
if badge_id:
user_badges_obj = UserBadges.objects.filter(user_id=request.user.id)
user_badges = [ item.badge_id for item in user_badges_obj ]
if badge_id not in user_badges:
UserBadges(user_id=request.user, badge_id=Badges.objects.get(badge_id=badge_id)).save()
return HttpResponseRedirect(reverse('leaderboard', args=('/weekly/' + week_id,)))
def login_user(request):
context = {'error':{}}
if request.method == 'POST':
print request.POST
user = authenticate(email=request.POST.get('username'), password=request.POST.get('password'))
print user
if user is not None:
# the password verified for the user
if user.is_active:
print("User is valid, active and authenticated")
login(request, user)
whats_new_modal = '' if request.META['HTTP_USER_AGENT'] in config.ALLOWED_APP_USER_AGENTS else '#whats-new-modal'
return HttpResponseRedirect(request.GET.get('next',reverse('index')) + whats_new_modal)
else:
print("The password is valid, but the account has been disabled!")
context['error']['general'] = 'The password is valid, but the account has been disabled!'
else:
# the authentication system was unable to verify the username and password
print("The username and password were incorrect.")
context['error']['general'] = 'The username and password were incorrect.'
return render(request,'pyquiz/login.html',context)
def get_leaderboard_stats():
last_quiz = LeaderBoard.objects.all().aggregate(Max('week_id'))
context = {}
leaderboard = {}
if last_quiz['week_id__max']:
leaderboard_old = LeaderBoard.objects.raw('select id,user_id_id,SUM(points) as points from pyquiz_leaderboard where week_id<%s group by user_id_id order by points desc', [last_quiz['week_id__max']])
leaderboard_new = LeaderBoard.objects.raw('select id,user_id_id,SUM(points) as points from pyquiz_leaderboard group by user_id_id order by points desc')
if leaderboard_old:
if leaderboard_new:
leaderboard_old_map = {item.user_id:{'points':item.points,'rank':rank+1} for rank, item in enumerate(leaderboard_old)}
len_leaderboard_old_map = len(list(leaderboard_new))
for rank,item in enumerate(leaderboard_new):
if not leaderboard.get(item.user_id):
leaderboard[item.user_id] = {'username':item.user_id.email,'points':item.points,'rank':rank + 1,'previous_rank':leaderboard_old_map.get(item.user_id,{'rank':len_leaderboard_old_map})['rank'], 'first_name':item.user_id.first_name, 'last_name':item.user_id.last_name}
leaderboard[item.user_id]['rank_diff'] = leaderboard[item.user_id]['previous_rank'] - leaderboard[item.user_id]['rank']
else:
context['hide_status'] = True
leaderboard_objs = leaderboard_old
leaderboard = {item.user_id:{'username':item.user_id.email,'points':item.points,'rank':rank+1, 'first_name':item.user_id.first_name, 'last_name':item.user_id.last_name} for rank, item in enumerate(leaderboard_objs)}
else:
context['hide_status'] = True
leaderboard_objs = leaderboard_new
leaderboard = {item.user_id:{'username':item.user_id.email,'points':item.points,'rank':rank+1, 'first_name':item.user_id.first_name, 'last_name':item.user_id.last_name} for rank, item in enumerate(leaderboard_objs)}
return context, leaderboard
@login_required
def show_leaderboard(request, board_type='overall', week_id=1):
context = {}
leaderboard = {}
limit = int(request.GET.get('limit',0))
if board_type and board_type.lower() == 'weekly' and week_id:
leaderboard_objs = LeaderBoard.objects.filter(week_id = week_id).order_by('-points')
if limit:
leaderboard_objs = leaderboard_objs[:limit]
leaderboard = {item.user_id:{'username':item.user_id.email,'points':item.points,'rank':rank+1, 'first_name':item.user_id.first_name, 'last_name':item.user_id.last_name} for rank, item in enumerate(leaderboard_objs)}
context['weekly'] = True
context['hide_status'] = True
elif board_type and board_type.lower() == 'monthly':
#leaderboard_objs = LeaderBoard.objects.all().values('user_id').annotate(points=Sum('points')).order_by('-points')
leaderboard_objs = LeaderBoard.objects.raw('select id,user_id_id,sum(points) as points from (select * from pyquiz_leaderboard where week_id > %(week_id)s ) b group by user_id_id order by points desc'%{'week_id':int(week_id)-4})
if limit:
leaderboard_objs = list(leaderboard_objs)[:limit]
leaderboard = {item.user_id:{'username':item.user_id.email,'points':item.points,'rank':rank+1, 'first_name':item.user_id.first_name, 'last_name':item
.user_id.last_name} for rank, item in enumerate(leaderboard_objs)}
context['weekly'] = True
context['hide_status'] = True
else:
if limit:
leaderboard_objs = LeaderBoard.objects.raw('select id,user_id_id,SUM(points) as points from pyquiz_leaderboard group by user_id_id order by points desc')
leaderboard_objs = list(leaderboard_objs)[:limit]
leaderboard = {item.user_id:{'username':item.user_id.email,'points':item.points,'rank':rank+1, 'first_name':item.user_id.first_name, 'last_name':item.user_id.last_name} for rank, item in enumerate(leaderboard_objs)}
context['weekly'] = True
context['hide_status'] = True
else:
extra_context, leaderboard = get_leaderboard_stats()
context.update(extra_context)
context['leaderboard'] = leaderboard
print context
print leaderboard
if request.is_ajax():
leaderboard_json = {}
for key,value in context['leaderboard'].iteritems():
value['points'] = int(value['points'])
leaderboard_json[value['rank']] = value
context['leaderboard'] = leaderboard_json
return HttpResponse(json.dumps(context),mimetype="application/javascript")
return render(request, 'pyquiz/leaderboard.html', context)
def register(request):
context = {'error':{}}
if request.method == "POST":
print request.POST
context['post_data'] = request.POST
if User.objects.filter(email=request.POST['username']).exists():
context['error']['username'] = 'Username Already taken :('
else:
new_user = User.objects.create_user(request.POST['username'].split("@")[0], request.POST['username'], request.POST['password'], \
first_name=request.POST['first_name'], last_name=request.POST['last_name'], role=request.POST['role'])
new_user.is_active = 0
utils.send_mail_via_gmail('pyquiz/register-mail.html', {'domain':settings.DOMAIN, 'email_id':base64.b64encode(request.POST['username'])},\
'PyQuiz:Welcome Aboard!', [request.POST['username']] \
)
context['success'] = True
print context
return render(request,'pyquiz/register.html',context)
@login_required
def edit_profile(request):
context = {}
if request.method == "POST":
print request.POST
context['post_data'] = request.POST
user_obj = User.objects.get(id=request.user.id)
user_obj.first_name = request.POST['first_name']
user_obj.last_name = request.POST['last_name']
user_obj.confirm_password = request.POST['confirm_password']
user_obj.role = request.POST['role']
user_obj.save()
context['success'] = True
context['post_data'] = user_obj.__dict__
else:
context['post_data'] = User.objects.get(id=request.user.id)
print context
return render(request,'pyquiz/edit-profile.html',context)
def verify_password(request, email_id):
context = {}
email_id = base64.b64decode(email_id)
u = User.objects.get(email__exact=email_id)
u.is_active = 1
u.save()
return render(request,'pyquiz/verified.html',context)
def reset_password(request, email_id):
context = {}
if request.method == "POST":
print request.POST
email_id = base64.b64decode(email_id)
print email_id
u = User.objects.get(email__exact=email_id)
u.set_password(request.POST['password'])
u.is_active = 1
u.save()
context['success'] = True
return render(request,'pyquiz/reset-password.html',context)
def forgot_password(request):
context = {}
if request.method == "POST":
print request.POST
utils.send_mail_via_gmail('pyquiz/forgot-password-mail.html', {'domain':settings.DOMAIN, 'email_id':base64.b64encode(request.POST['username'])},\
'PyQuiz:reset-password', [request.POST['username']] \
)
context['mail_sent'] = True
return render(request,'pyquiz/forgot-password.html',context)
@login_required
def show_summary(request):
context = {}
template = 'pyquiz/summary.html'
quiz_list = LeaderBoard.objects.all().filter(user_id=request.user.id)
context['quiz_list'] = [quiz.week_id for quiz in quiz_list]
print context
return render(request, template, context)
@login_required
def show_review(request,week_id):
context = {}
template = 'pyquiz/review.html'
context['quiz_answers'] = UserAnswers.objects.filter(user_id=request.user.id, week_id=week_id)
print context
return render(request, template, context)
def admin_manager(request):
context = {'data':{}}
if not request.user.is_superuser:
return render(request, 'pyquiz/404.html', {})
if request.method == "POST":
print request.POST
question_obj = Questions(question=request.POST['question'], week_id=request.POST['week_id'], timeout=request.POST['timeout'])
question_obj.save()
choices_obj = Choices(question_id=question_obj, answer=request.POST[request.POST['answer']])
for choice in ('choice_1', 'choice_2', 'choice_3', 'choice_4'):
choices_obj.__setattr__(choice, request.POST.get(choice, None))
choices_obj.save()
return HttpResponseRedirect(reverse('admin'))
else:
last_quiz = QuizHistory.objects.all().aggregate(Max('week_id'))
if last_quiz['week_id__max']:
context['data']['week_id'] = last_quiz['week_id__max'] + 1
else:
context['data']['week_id'] = 1
# context['question_number'] = Questions.objects.filter(week_id = context['week_id']).annotate(max_question_id=Count('id')) \
# TO TEST THE NO WEEK_ID PRESENT IN QUESTIONS MODEL CASE
context['data']['question_number'] = Questions.objects.filter(week_id = context['data']['week_id']).count()
context['data']['question_number'] = context['data']['question_number'] + 1 if context['data']['question_number'] else 1
print context
return render(request,'pyquiz/admin.html',context)
@login_required
@csrf_exempt
def feedback(request):
if request.method == "POST":
print request.POST
utils.send_mail_via_gmail('pyquiz/feedback-mail.html', {'username': request.POST['username'], 'message': request.POST['message']},\
'PyQuiz:Feedback', ['vivekhas3@gmail.com', 'pyquizcom@gmail.com'] \
)
return HttpResponse("Success")
@user_passes_test(lambda u: u.is_superuser)
@login_required
def generate_list(request):
users_list = User.objects.all()
email_ids = ''
email_ids = [user.email for user in users_list]
# for user in users_list:
utils.send_mail_via_gmail('pyquiz/users-list-mail.html', {},\
'PyQuiz:Quiz Ready!', email_ids \
)
return HttpResponse("Mail Sent")
@user_passes_test(lambda u: u.is_superuser)
@login_required
def update_rewards(request):
badges = { badge.badge_id:badge for badge in Badges.objects.all()}
#BADGE 1
overall_winners = LeaderBoard.objects.raw('select id,user_id_id,SUM(points) as points from pyquiz_leaderboard group by user_id_id order by points desc limit 5')
for item in overall_winners:
user_badges_obj = UserBadges.objects.filter(user_id=item.user_id_id)
user_badges = [ item.badge_id for item in user_badges_obj ]
if badges[1] not in user_badges:
UserBadges(user_id=item.user_id, badge_id=badges[1]).save()
#BADGE 2
last_quiz = LeaderBoard.objects.all().aggregate(Max('week_id'))
weekly_winner = LeaderBoard.objects.filter(week_id = last_quiz['week_id__max']).order_by('-points')[0]
user_badges_obj = UserBadges.objects.filter(user_id=weekly_winner.user_id)
user_badges = [ item.badge_id for item in user_badges_obj ]
if badges[2] not in user_badges:
UserBadges(user_id=weekly_winner.user_id, badge_id=badges[2]).save()
#BADGE 3
monthly_winner = LeaderBoard.objects.raw('select id,user_id_id,sum(points) as points from (select * from pyquiz_leaderboard order by week_id ) b group by user_id_id order by points desc limit 1')[0]
user_badges_obj = UserBadges.objects.filter(user_id=monthly_winner.user_id_id)
user_badges = [ item.badge_id for item in user_badges_obj ]
if badges[3] not in user_badges:
UserBadges(user_id=monthly_winner.user_id, badge_id=badges[3]).save()
#BADGE 4
overall_winner = LeaderBoard.objects.raw('select id,user_id_id,SUM(points) as points from pyquiz_leaderboard group by user_id_id order by points desc limit 1')[0]
user_badges_obj = UserBadges.objects.filter(user_id=overall_winner.user_id_id)
user_badges = [ item.badge_id for item in user_badges_obj ]
if badges[4] not in user_badges:
UserBadges(user_id=overall_winner.user_id, badge_id=badges[4]).save()
#BADGE 5
if badges[5] not in user_badges:
overall_second_winner = LeaderBoard.objects.raw('select id,user_id_id,SUM(points) as points from pyquiz_leaderboard group by user_id_id order by points desc limit 1, 1')[0]
if overall_winner.points - overall_second_winner.points >50:
UserBadges(user_id=overall_winner.user_id, badge_id=badges[5]).save()
return HttpResponse("Rewards updated")
def push_message_to_gcm(request):
"""
"""
registration_ids = [user.registration_id for user in GCMRegistrations.objects.all()]
json_data = {"data" : {"message":"panni", "title":"PyQuiz"}, "registration_ids": registration_ids}
url = 'https://android.googleapis.com/gcm/send'
myKey = "key=" + config.API_KEY
data = json.dumps(json_data)
headers = {'Content-Type': 'application/json', 'Authorization': myKey}
req = urllib2.Request(url, data, headers)
f = urllib2.urlopen(req)
response = json.loads(f.read())
print response
return HttpResponse("success")
def save_gcm_id(request, registration_id):
"""
"""
if not GCMRegistrations.objects.filter(registration_id=registration_id):
GCMRegistrations(registration_id=registration_id).save()
data = json.dumps({"success":1})
data = '%s(%s);' % (request.REQUEST['callback'], data)
return HttpResponse(data, "text/javascript")
def logout_user(request):
logout(request)
return HttpResponseRedirect(settings.LOGIN_URL)
def page_not_found(request):
template = 'pyquiz/404_static.html'
if request.user.is_authenticated():
template = 'pyquiz/404.html'
return render(request, template, {})
def internal_error(request):
return render(request, 'pyquiz/500.html', {})
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dispatches tests, either sharding or replicating them.
Performs the following steps:
* Create a test collection factory, using the given tests
- If sharding: test collection factory returns the same shared test collection
to all test runners
- If replciating: test collection factory returns a unique test collection to
each test runner, with the same set of tests in each.
* Create a test runner for each device.
* Run each test runner in its own thread, grabbing tests from the test
collection until there are no tests left.
"""
# TODO(jbudorick) Deprecate and remove this class after any relevant parts have
# been ported to the new environment / test instance model.
import logging
import threading
from pylib import android_commands
from pylib import constants
from pylib.base import base_test_result
from pylib.base import test_collection
from pylib.device import device_errors
from pylib.utils import reraiser_thread
from pylib.utils import watchdog_timer
DEFAULT_TIMEOUT = 7 * 60 # seven minutes
class _ThreadSafeCounter(object):
"""A threadsafe counter."""
def __init__(self):
self._lock = threading.Lock()
self._value = 0
def GetAndIncrement(self):
"""Get the current value and increment it atomically.
Returns:
The value before incrementing.
"""
with self._lock:
pre_increment = self._value
self._value += 1
return pre_increment
class _Test(object):
"""Holds a test with additional metadata."""
def __init__(self, test, tries=0):
"""Initializes the _Test object.
Args:
test: The test.
tries: Number of tries so far.
"""
self.test = test
self.tries = tries
def _RunTestsFromQueue(runner, collection, out_results, watcher,
num_retries, tag_results_with_device=False):
"""Runs tests from the collection until empty using the given runner.
Adds TestRunResults objects to the out_results list and may add tests to the
out_retry list.
Args:
runner: A TestRunner object used to run the tests.
collection: A TestCollection from which to get _Test objects to run.
out_results: A list to add TestRunResults to.
watcher: A watchdog_timer.WatchdogTimer object, used as a shared timeout.
num_retries: Number of retries for a test.
tag_results_with_device: If True, appends the name of the device on which
the test was run to the test name. Used when replicating to identify
which device ran each copy of the test, and to ensure each copy of the
test is recorded separately.
"""
def TagTestRunResults(test_run_results):
"""Tags all results with the last 4 digits of the device id.
Used when replicating tests to distinguish the same tests run on different
devices. We use a set to store test results, so the hash (generated from
name and tag) must be unique to be considered different results.
"""
new_test_run_results = base_test_result.TestRunResults()
for test_result in test_run_results.GetAll():
test_result.SetName('%s_%s' % (runner.device_serial[-4:],
test_result.GetName()))
new_test_run_results.AddResult(test_result)
return new_test_run_results
for test in collection:
watcher.Reset()
try:
if runner.device_serial not in android_commands.GetAttachedDevices():
# Device is unresponsive, stop handling tests on this device.
msg = 'Device %s is unresponsive.' % runner.device_serial
logging.warning(msg)
raise device_errors.DeviceUnreachableError(msg)
result, retry = runner.RunTest(test.test)
if tag_results_with_device:
result = TagTestRunResults(result)
test.tries += 1
if retry and test.tries <= num_retries:
# Retry non-passing results, only record passing results.
pass_results = base_test_result.TestRunResults()
pass_results.AddResults(result.GetPass())
out_results.append(pass_results)
logging.warning('Will retry test %s, try #%s.', retry, test.tries)
collection.add(_Test(test=retry, tries=test.tries))
else:
# All tests passed or retry limit reached. Either way, record results.
out_results.append(result)
except:
# An unhandleable exception, ensure tests get run by another device and
# reraise this exception on the main thread.
collection.add(test)
raise
finally:
# Retries count as separate tasks so always mark the popped test as done.
collection.test_completed()
def _SetUp(runner_factory, device, out_runners, threadsafe_counter):
"""Creates a test runner for each device and calls SetUp() in parallel.
Note: if a device is unresponsive the corresponding TestRunner will not be
added to out_runners.
Args:
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
device: The device serial number to set up.
out_runners: List to add the successfully set up TestRunner object.
threadsafe_counter: A _ThreadSafeCounter object used to get shard indices.
"""
try:
index = threadsafe_counter.GetAndIncrement()
logging.warning('Creating shard %s for device %s.', index, device)
runner = runner_factory(device, index)
runner.SetUp()
out_runners.append(runner)
except (device_errors.DeviceUnreachableError,
# TODO(jbudorick) Remove this once the underlying implementations
# for the above are switched or wrapped.
android_commands.errors.DeviceUnresponsiveError) as e:
logging.warning('Failed to create shard for %s: [%s]', device, e)
def _RunAllTests(runners, test_collection_factory, num_retries, timeout=None,
tag_results_with_device=False):
"""Run all tests using the given TestRunners.
Args:
runners: A list of TestRunner objects.
test_collection_factory: A callable to generate a TestCollection object for
each test runner.
num_retries: Number of retries for a test.
timeout: Watchdog timeout in seconds.
tag_results_with_device: If True, appends the name of the device on which
the test was run to the test name. Used when replicating to identify
which device ran each copy of the test, and to ensure each copy of the
test is recorded separately.
Returns:
A tuple of (TestRunResults object, exit code)
"""
logging.warning('Running tests with %s test runners.' % (len(runners)))
results = []
exit_code = 0
run_results = base_test_result.TestRunResults()
watcher = watchdog_timer.WatchdogTimer(timeout)
test_collections = [test_collection_factory() for _ in runners]
threads = [
reraiser_thread.ReraiserThread(
_RunTestsFromQueue,
[r, tc, results, watcher, num_retries, tag_results_with_device],
name=r.device_serial[-4:])
for r, tc in zip(runners, test_collections)]
workers = reraiser_thread.ReraiserThreadGroup(threads)
workers.StartAll()
# Catch DeviceUnreachableErrors and set a warning exit code
try:
workers.JoinAll(watcher)
except (device_errors.DeviceUnreachableError,
# TODO(jbudorick) Remove this once the underlying implementations
# for the above are switched or wrapped.
android_commands.errors.DeviceUnresponsiveError) as e:
logging.error(e)
if not all((len(tc) == 0 for tc in test_collections)):
logging.error('Only ran %d tests (all devices are likely offline).' %
len(results))
for tc in test_collections:
run_results.AddResults(base_test_result.BaseTestResult(
t, base_test_result.ResultType.UNKNOWN) for t in tc.test_names())
for r in results:
run_results.AddTestRunResults(r)
if not run_results.DidRunPass():
exit_code = constants.ERROR_EXIT_CODE
return (run_results, exit_code)
def _CreateRunners(runner_factory, devices, timeout=None):
"""Creates a test runner for each device and calls SetUp() in parallel.
Note: if a device is unresponsive the corresponding TestRunner will not be
included in the returned list.
Args:
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
devices: List of device serial numbers as strings.
timeout: Watchdog timeout in seconds, defaults to the default timeout.
Returns:
A list of TestRunner objects.
"""
logging.warning('Creating %s test runners.' % len(devices))
runners = []
counter = _ThreadSafeCounter()
threads = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(_SetUp,
[runner_factory, d, runners, counter],
name=d[-4:])
for d in devices])
threads.StartAll()
threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))
return runners
def _TearDownRunners(runners, timeout=None):
"""Calls TearDown() for each test runner in parallel.
Args:
runners: A list of TestRunner objects.
timeout: Watchdog timeout in seconds, defaults to the default timeout.
"""
threads = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(r.TearDown, name=r.device_serial[-4:])
for r in runners])
threads.StartAll()
threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))
def ApplyMaxPerRun(tests, max_per_run):
"""Rearrange the tests so that no group contains more than max_per_run tests.
Args:
tests:
max_per_run:
Returns:
A list of tests with no more than max_per_run per run.
"""
tests_expanded = []
for test_group in tests:
if type(test_group) != str:
# Do not split test objects which are not strings.
tests_expanded.append(test_group)
else:
test_split = test_group.split(':')
for i in range(0, len(test_split), max_per_run):
tests_expanded.append(':'.join(test_split[i:i+max_per_run]))
return tests_expanded
def RunTests(tests, runner_factory, devices, shard=True,
test_timeout=DEFAULT_TIMEOUT, setup_timeout=DEFAULT_TIMEOUT,
num_retries=2, max_per_run=256):
"""Run all tests on attached devices, retrying tests that don't pass.
Args:
tests: List of tests to run.
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
devices: List of attached devices.
shard: True if we should shard, False if we should replicate tests.
- Sharding tests will distribute tests across all test runners through a
shared test collection.
- Replicating tests will copy all tests to each test runner through a
unique test collection for each test runner.
test_timeout: Watchdog timeout in seconds for running tests.
setup_timeout: Watchdog timeout in seconds for creating and cleaning up
test runners.
num_retries: Number of retries for a test.
max_per_run: Maximum number of tests to run in any group.
Returns:
A tuple of (base_test_result.TestRunResults object, exit code).
"""
if not tests:
logging.critical('No tests to run.')
return (base_test_result.TestRunResults(), constants.ERROR_EXIT_CODE)
tests_expanded = ApplyMaxPerRun(tests, max_per_run)
if shard:
# Generate a shared TestCollection object for all test runners, so they
# draw from a common pool of tests.
shared_test_collection = test_collection.TestCollection(
[_Test(t) for t in tests_expanded])
test_collection_factory = lambda: shared_test_collection
tag_results_with_device = False
log_string = 'sharded across devices'
else:
# Generate a unique TestCollection object for each test runner, but use
# the same set of tests.
test_collection_factory = lambda: test_collection.TestCollection(
[_Test(t) for t in tests_expanded])
tag_results_with_device = True
log_string = 'replicated on each device'
logging.info('Will run %d tests (%s): %s',
len(tests_expanded), log_string, str(tests_expanded))
runners = _CreateRunners(runner_factory, devices, setup_timeout)
try:
return _RunAllTests(runners, test_collection_factory,
num_retries, test_timeout, tag_results_with_device)
finally:
try:
_TearDownRunners(runners, setup_timeout)
except (device_errors.DeviceUnreachableError,
# TODO(jbudorick) Remove this once the underlying implementations
# for the above are switched or wrapped.
android_commands.errors.DeviceUnresponsiveError) as e:
logging.warning('Device unresponsive during TearDown: [%s]', e)
except Exception as e:
logging.error('Unexpected exception caught during TearDown: %s' % str(e))
| |
"""Test event helpers."""
# pylint: disable=protected-access,too-many-public-methods
# pylint: disable=too-few-public-methods
import unittest
from datetime import datetime, timedelta
from astral import Astral
import homeassistant.core as ha
from homeassistant.helpers.event import (
track_point_in_utc_time,
track_point_in_time,
track_utc_time_change,
track_time_change,
track_state_change,
track_sunrise,
track_sunset,
)
from homeassistant.components import sun
import homeassistant.util.dt as dt_util
from tests.common import get_test_home_assistant
class TestEventHelpers(unittest.TestCase):
"""Test the Home Assistant event helpers."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_track_point_in_time(self):
"""Test track point in time."""
before_birthday = datetime(1985, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
birthday_paulus = datetime(1986, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
after_birthday = datetime(1987, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
runs = []
track_point_in_utc_time(
self.hass, lambda x: runs.append(1), birthday_paulus)
self._send_time_changed(before_birthday)
self.hass.pool.block_till_done()
self.assertEqual(0, len(runs))
self._send_time_changed(birthday_paulus)
self.hass.pool.block_till_done()
self.assertEqual(1, len(runs))
# A point in time tracker will only fire once, this should do nothing
self._send_time_changed(birthday_paulus)
self.hass.pool.block_till_done()
self.assertEqual(1, len(runs))
track_point_in_time(
self.hass, lambda x: runs.append(1), birthday_paulus)
self._send_time_changed(after_birthday)
self.hass.pool.block_till_done()
self.assertEqual(2, len(runs))
def test_track_time_change(self):
"""Test tracking time change."""
wildcard_runs = []
specific_runs = []
track_time_change(self.hass, lambda x: wildcard_runs.append(1))
track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), second=[0, 30])
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(1, len(wildcard_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 15))
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(2, len(wildcard_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 30))
self.hass.pool.block_till_done()
self.assertEqual(2, len(specific_runs))
self.assertEqual(3, len(wildcard_runs))
def test_track_state_change(self):
"""Test track_state_change."""
# 2 lists to track how often our callbacks get called
specific_runs = []
wildcard_runs = []
track_state_change(
self.hass, 'light.Bowl', lambda a, b, c: specific_runs.append(1),
'on', 'off')
track_state_change(
self.hass, 'light.Bowl',
lambda _, old_s, new_s: wildcard_runs.append((old_s, new_s)),
ha.MATCH_ALL, ha.MATCH_ALL)
# Adding state to state machine
self.hass.states.set("light.Bowl", "on")
self.hass.pool.block_till_done()
self.assertEqual(0, len(specific_runs))
self.assertEqual(1, len(wildcard_runs))
self.assertIsNone(wildcard_runs[-1][0])
self.assertIsNotNone(wildcard_runs[-1][1])
# Set same state should not trigger a state change/listener
self.hass.states.set('light.Bowl', 'on')
self.hass.pool.block_till_done()
self.assertEqual(0, len(specific_runs))
self.assertEqual(1, len(wildcard_runs))
# State change off -> on
self.hass.states.set('light.Bowl', 'off')
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(2, len(wildcard_runs))
# State change off -> off
self.hass.states.set('light.Bowl', 'off', {"some_attr": 1})
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(3, len(wildcard_runs))
# State change off -> on
self.hass.states.set('light.Bowl', 'on')
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(4, len(wildcard_runs))
self.hass.states.remove('light.bowl')
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(5, len(wildcard_runs))
self.assertIsNotNone(wildcard_runs[-1][0])
self.assertIsNone(wildcard_runs[-1][1])
def test_track_sunrise(self):
"""Test track the sunrise."""
latitude = 32.87336
longitude = 117.22743
# Setup sun component
self.hass.config.latitude = latitude
self.hass.config.longitude = longitude
sun.setup(self.hass, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}})
# Get next sunrise/sunset
astral = Astral()
utc_now = dt_util.utcnow()
mod = -1
while True:
next_rising = (astral.sunrise_utc(utc_now +
timedelta(days=mod), latitude, longitude))
if next_rising > utc_now:
break
mod += 1
# Track sunrise
runs = []
track_sunrise(self.hass, lambda: runs.append(1))
offset_runs = []
offset = timedelta(minutes=30)
track_sunrise(self.hass, lambda: offset_runs.append(1), offset)
# run tests
self._send_time_changed(next_rising - offset)
self.hass.pool.block_till_done()
self.assertEqual(0, len(runs))
self.assertEqual(0, len(offset_runs))
self._send_time_changed(next_rising)
self.hass.pool.block_till_done()
self.assertEqual(1, len(runs))
self.assertEqual(0, len(offset_runs))
self._send_time_changed(next_rising + offset)
self.hass.pool.block_till_done()
self.assertEqual(2, len(runs))
self.assertEqual(1, len(offset_runs))
def test_track_sunset(self):
"""Test track the sunset."""
latitude = 32.87336
longitude = 117.22743
# Setup sun component
self.hass.config.latitude = latitude
self.hass.config.longitude = longitude
sun.setup(self.hass, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}})
# Get next sunrise/sunset
astral = Astral()
utc_now = dt_util.utcnow()
mod = -1
while True:
next_setting = (astral.sunset_utc(utc_now +
timedelta(days=mod), latitude, longitude))
if next_setting > utc_now:
break
mod += 1
# Track sunset
runs = []
track_sunset(self.hass, lambda: runs.append(1))
offset_runs = []
offset = timedelta(minutes=30)
track_sunset(self.hass, lambda: offset_runs.append(1), offset)
# Run tests
self._send_time_changed(next_setting - offset)
self.hass.pool.block_till_done()
self.assertEqual(0, len(runs))
self.assertEqual(0, len(offset_runs))
self._send_time_changed(next_setting)
self.hass.pool.block_till_done()
self.assertEqual(1, len(runs))
self.assertEqual(0, len(offset_runs))
self._send_time_changed(next_setting + offset)
self.hass.pool.block_till_done()
self.assertEqual(2, len(runs))
self.assertEqual(1, len(offset_runs))
def _send_time_changed(self, now):
"""Send a time changed event."""
self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: now})
def test_periodic_task_minute(self):
"""Test periodic tasks per minute."""
specific_runs = []
track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), minute='/5')
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 3, 0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 5, 0))
self.hass.pool.block_till_done()
self.assertEqual(2, len(specific_runs))
def test_periodic_task_hour(self):
"""Test periodic tasks per hour."""
specific_runs = []
track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), hour='/2')
self._send_time_changed(datetime(2014, 5, 24, 22, 0, 0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 24, 23, 0, 0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 24, 0, 0, 0))
self.hass.pool.block_till_done()
self.assertEqual(2, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 25, 1, 0, 0))
self.hass.pool.block_till_done()
self.assertEqual(2, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 25, 2, 0, 0))
self.hass.pool.block_till_done()
self.assertEqual(3, len(specific_runs))
def test_periodic_task_day(self):
"""Test periodic tasks per day."""
specific_runs = []
track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), day='/2')
self._send_time_changed(datetime(2014, 5, 2, 0, 0, 0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 3, 12, 0, 0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 4, 0, 0, 0))
self.hass.pool.block_till_done()
self.assertEqual(2, len(specific_runs))
def test_periodic_task_year(self):
"""Test periodic tasks per year."""
specific_runs = []
track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), year='/2')
self._send_time_changed(datetime(2014, 5, 2, 0, 0, 0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2015, 5, 2, 0, 0, 0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2016, 5, 2, 0, 0, 0))
self.hass.pool.block_till_done()
self.assertEqual(2, len(specific_runs))
def test_periodic_task_wrong_input(self):
"""Test periodic tasks with wrong input."""
specific_runs = []
track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), year='/two')
self._send_time_changed(datetime(2014, 5, 2, 0, 0, 0))
self.hass.pool.block_till_done()
self.assertEqual(0, len(specific_runs))
| |
import sys
import os
import time
import threading
current_path = os.path.dirname(os.path.abspath(__file__))
if __name__ == "__main__":
python_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir))
root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir))
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
sys.path.append(root_path)
if sys.platform == "win32":
win32_lib = os.path.abspath( os.path.join(python_path, 'lib', 'win32'))
sys.path.append(win32_lib)
elif sys.platform.startswith("linux"):
linux_lib = os.path.abspath( os.path.join(python_path, 'lib', 'linux'))
sys.path.append(linux_lib)
from gae_proxy.local.config import config
import simple_http_client
from xlog import getLogger
xlog = getLogger("gae_proxy")
max_timeout = 5
class CheckNetwork(object):
def __init__(self, type="IPv4"):
self.type = type
self.urls = []
self._checking_lock = threading.Lock()
self._checking_num = 0
self.network_stat = "unknown"
self.last_check_time = 0
self.continue_fail_count = 0
if config.PROXY_ENABLE:
if config.PROXY_USER:
self.proxy = "%s://%s:%s@%s:%d" % \
(config.PROXY_TYPE, config.PROXY_USER, config.PROXY_PASSWD, config.PROXY_HOST, config.PROXY_PORT)
else:
self.proxy = "%s://%s:%d" % \
(config.PROXY_TYPE, config.PROXY_HOST, config.PROXY_PORT)
else:
self.proxy = None
self.http_client = simple_http_client.Client(self.proxy, timeout=10)
def report_ok(self):
self.network_stat = "OK"
self.last_check_time = time.time()
self.continue_fail_count = 0
def report_fail(self):
self.continue_fail_count += 1
# don't record last_check_time here, it's not a real check
# last_check_time = time.time()
if self.continue_fail_count > 10:
# don't set network_stat to "unknown", wait for check
# network_stat = "unknown"
xlog.debug("report_connect_fail %s continue_fail_count:%d",
self.type, self.continue_fail_count)
self.triger_check_network(True)
def get_stat(self):
if config.check_local_network_rules == "force_fail":
return "Fail"
elif config.check_local_network_rules == "force_ok":
return "OK"
else:
return self.network_stat
def is_ok(self):
if config.check_local_network_rules == "normal":
return self.network_stat == "OK"
elif config.check_local_network_rules == "force_fail":
return False
elif config.check_local_network_rules == "force_ok":
return True
else:
return self.network_stat == "OK"
def _test_host(self, url):
try:
header = {
"user-agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36",
"accept": "application/json, text/javascript, */*; q=0.01",
"accept-encoding": "gzip, deflate, sdch",
"accept-language": 'en-US,en;q=0.8,ja;q=0.6,zh-CN;q=0.4,zh;q=0.2',
"connection": "keep-alive"
}
response = self.http_client.request("HEAD", url, header, "", read_payload=False)
if response:
return True
except Exception as e:
if __name__ == "__main__":
xlog.exception("test %s e:%r", url, e)
return False
def _simple_check_worker(self):
time_now = time.time()
self._checking_lock.acquire()
self._checking_num += 1
self._checking_lock.release()
network_ok = False
for url in self.urls:
if self._test_host(url):
network_ok = True
break
else:
if __name__ == "__main__":
xlog.warn("test %s fail", url)
time.sleep(1)
if network_ok:
self.last_check_time = time.time()
self.report_ok()
xlog.debug("network %s is ok, cost:%d ms", self.type, 1000 * (time.time() - time_now))
else:
xlog.warn("network %s fail", self.type)
self.network_stat = "Fail"
self.last_check_time = time.time()
self._checking_lock.acquire()
self._checking_num -= 1
self._checking_lock.release()
def triger_check_network(self, fail=False, force=False):
time_now = time.time()
if not force:
if self._checking_num > 0:
return
if fail or self.network_stat != "OK":
# Fail or unknown
if time_now - self.last_check_time < 3:
return
else:
if time_now - self.last_check_time < 10:
return
self.last_check_time = time_now
threading.Thread(target=self._simple_check_worker).start()
IPv4 = CheckNetwork("IPv4")
IPv4.urls = [
"https://www.microsoft.com",
"https://www.apple.com",
"https://code.jquery.com",
"https://cdn.bootcss.com",
"https://cdnjs.cloudflare.com"]
IPv4.triger_check_network()
IPv6 = CheckNetwork("IPv6")
IPv6.urls = ["https://ipv6.vm3.test-ipv6.com",
"http://[2001:470:1:18::115]",
"http://ipv6.lookup.test-ipv6.com",
"http://v6.myip.la"
]
IPv6.triger_check_network()
def report_ok(ip):
if "." in ip:
IPv4.report_ok()
else:
IPv6.report_ok()
def report_fail(ip):
if "." in ip:
IPv4.report_fail()
else:
IPv6.report_fail()
def is_ok(ip=None):
if not ip:
return IPv4.is_ok() or IPv6.is_ok()
elif "." in ip:
return IPv4.is_ok()
else:
return IPv6.is_ok()
if __name__ == "__main__":
#print(IPv6._test_host("http://[2804:10:4068::202:82]"))
IPv4._test_host("https://www.baidu.com")
| |
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A setup module for the GRPC Python package."""
import os
import os.path
import shutil
import sys
from distutils import core as _core
from distutils import extension as _extension
import setuptools
from setuptools.command import egg_info
# Redirect the manifest template from MANIFEST.in to PYTHON-MANIFEST.in.
egg_info.manifest_maker.template = 'PYTHON-MANIFEST.in'
PYTHON_STEM = './src/python/grpcio'
CORE_INCLUDE = ('./include', '.',)
BORINGSSL_INCLUDE = ('./third_party/boringssl/include',)
ZLIB_INCLUDE = ('./third_party/zlib',)
# Ensure we're in the proper directory whether or not we're being used by pip.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.abspath(PYTHON_STEM))
# Break import-style to ensure we can actually find our in-repo dependencies.
import commands
import grpc_core_dependencies
LICENSE = '3-clause BSD'
# Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files
# to have been generated by building first *with* Cython support.
BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False)
# Environment variable to determine whether or not to enable coverage analysis
# in Cython modules.
ENABLE_CYTHON_TRACING = os.environ.get(
'GRPC_PYTHON_ENABLE_CYTHON_TRACING', False)
# Environment variable to determine whether or not to include the test files in
# the installation.
INSTALL_TESTS = os.environ.get('GRPC_PYTHON_INSTALL_TESTS', False)
CYTHON_EXTENSION_PACKAGE_NAMES = ()
CYTHON_EXTENSION_MODULE_NAMES = ('grpc._cython.cygrpc',)
EXTENSION_INCLUDE_DIRECTORIES = (
(PYTHON_STEM,) + CORE_INCLUDE + BORINGSSL_INCLUDE + ZLIB_INCLUDE)
EXTENSION_LIBRARIES = ('m',)
if not "darwin" in sys.platform:
EXTENSION_LIBRARIES += ('rt',)
DEFINE_MACROS = (('OPENSSL_NO_ASM', 1),)
def cython_extensions(package_names, module_names, include_dirs, libraries,
define_macros, build_with_cython=False):
if ENABLE_CYTHON_TRACING:
define_macros = define_macros + [('CYTHON_TRACE_NOGIL', 1)]
file_extension = 'pyx' if build_with_cython else 'c'
module_files = [os.path.join(PYTHON_STEM,
name.replace('.', '/') + '.' + file_extension)
for name in module_names]
extensions = [
_extension.Extension(
name=module_name,
sources=[module_file] + grpc_core_dependencies.CORE_SOURCE_FILES,
include_dirs=include_dirs, libraries=libraries,
define_macros=define_macros,
) for (module_name, module_file) in zip(module_names, module_files)
]
if build_with_cython:
import Cython.Build
return Cython.Build.cythonize(
extensions,
include_path=include_dirs,
compiler_directives={'linetrace': bool(ENABLE_CYTHON_TRACING)})
else:
return extensions
CYTHON_EXTENSION_MODULES = cython_extensions(
list(CYTHON_EXTENSION_PACKAGE_NAMES), list(CYTHON_EXTENSION_MODULE_NAMES),
list(EXTENSION_INCLUDE_DIRECTORIES), list(EXTENSION_LIBRARIES),
list(DEFINE_MACROS), bool(BUILD_WITH_CYTHON))
PACKAGE_DIRECTORIES = {
'': PYTHON_STEM,
}
INSTALL_REQUIRES = (
'six>=1.10',
'enum34>=1.0.4',
'futures>=2.2.0',
# TODO(atash): eventually split the grpcio package into a metapackage
# depending on protobuf and the runtime component (independent of protobuf)
'protobuf>=3.0.0a3',
)
SETUP_REQUIRES = (
'sphinx>=1.3',
) + INSTALL_REQUIRES
COMMAND_CLASS = {
'install': commands.Install,
'doc': commands.SphinxDocumentation,
'build_proto_modules': commands.BuildProtoModules,
'build_project_metadata': commands.BuildProjectMetadata,
'build_py': commands.BuildPy,
'build_ext': commands.BuildExt,
'gather': commands.Gather,
'run_interop': commands.RunInterop,
'bdist_egg_grpc_custom': commands.BdistEggCustomName,
}
# Ensure that package data is copied over before any commands have been run:
credentials_dir = os.path.join(PYTHON_STEM, 'grpc/_adapter/credentials')
try:
os.mkdir(credentials_dir)
except OSError:
pass
shutil.copyfile('etc/roots.pem', os.path.join(credentials_dir, 'roots.pem'))
TEST_PACKAGE_DATA = {
'tests.interop': [
'credentials/ca.pem',
'credentials/server1.key',
'credentials/server1.pem',
],
'tests.protoc_plugin': [
'protoc_plugin_test.proto',
],
'tests.unit': [
'credentials/ca.pem',
'credentials/server1.key',
'credentials/server1.pem',
],
'grpc._adapter': [
'credentials/roots.pem'
],
}
TESTS_REQUIRE = (
'oauth2client>=1.4.7',
'protobuf>=3.0.0a3',
'coverage>=4.0',
) + INSTALL_REQUIRES
TEST_SUITE = 'tests'
TEST_LOADER = 'tests:Loader'
TEST_RUNNER = 'tests:Runner'
PACKAGE_DATA = {}
if INSTALL_TESTS:
PACKAGE_DATA = dict(PACKAGE_DATA, **TEST_PACKAGE_DATA)
PACKAGES = setuptools.find_packages(PYTHON_STEM)
else:
PACKAGES = setuptools.find_packages(
PYTHON_STEM, exclude=['tests', 'tests.*'])
setuptools.setup(
name='grpcio',
version='0.12.0b6',
license=LICENSE,
ext_modules=CYTHON_EXTENSION_MODULES,
packages=list(PACKAGES),
package_dir=PACKAGE_DIRECTORIES,
package_data=PACKAGE_DATA,
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
cmdclass=COMMAND_CLASS,
tests_require=TESTS_REQUIRE,
test_suite=TEST_SUITE,
test_loader=TEST_LOADER,
test_runner=TEST_RUNNER,
)
| |
#
# Sub-module containing nested samplers
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import pints
import numpy as np
try:
from scipy.special import logsumexp
except ImportError: # pragma: no cover
# Older versions
from scipy.misc import logsumexp
class NestedSampler(pints.TunableMethod):
"""
Abstract base class for nested samplers.
Parameters
----------
log_prior : pints.LogPrior
A logprior to draw proposal samples from.
"""
def __init__(self, log_prior):
# Store logprior
if not isinstance(log_prior, pints.LogPrior):
raise ValueError('Given log_prior must extend pints.LogPrior')
# prior accessed by subclasses to do prior sampling in ask() step
self._log_prior = log_prior
# Current value of the threshold log-likelihood value
self._running_log_likelihood = -float('inf')
self._proposed = None
# Initialise active point containers
self._n_active_points = 400
self._n_parameters = self._log_prior.n_parameters()
self._m_active = np.zeros((self._n_active_points,
self._n_parameters + 1))
self._min_index = None
self._accept_count = 0
self._n_evals = 0
def active_points(self):
"""
Returns the active points from nested sampling run.
"""
return self._m_active
def ask(self):
"""
Proposes new point at which to evaluate log-likelihood.
"""
raise NotImplementedError
def _initialise_active_points(self, m_initial, v_fx):
"""
Sets initial active points matrix.
"""
for i, fx in enumerate(v_fx):
self._m_active[i, self._n_parameters] = fx
self._m_active[:, :-1] = m_initial
self._min_index = np.argmin(self._m_active[:, self._n_parameters])
self._set_running_log_likelihood(
self._m_active[self._min_index, self._n_parameters])
def in_initial_phase(self):
"""
For methods that need an initial phase (see
:meth:`needs_initial_phase()`), this method returns ``True`` if the
method is currently configured to be in its initial phase. For other
methods a ``NotImplementedError`` is returned.
"""
raise NotImplementedError
def min_index(self):
""" Returns index of sample with lowest log-likelihood. """
return self._min_index
def n_active_points(self):
"""
Returns the number of active points that will be used in next run.
"""
return self._n_active_points
def n_hyper_parameters(self):
""" See :meth:`TunableMethod.n_hyper_parameters()`. """
raise NotImplementedError
def name(self):
""" Name of sampler """
raise NotImplementedError
def needs_sensitivities(self):
"""
Determines whether sampler uses sensitivities of the solution.
"""
return self._needs_sensitivities
def needs_initial_phase(self):
"""
Returns ``True`` if this method needs an initial phase, for example
ellipsoidal nested sampling has a period of running rejection
sampling before it starts to fit ellipsoids to points.
"""
return False
def running_log_likelihood(self):
"""
Returns current value of the threshold log-likelihood value.
"""
return self._running_log_likelihood
def set_n_active_points(self, active_points):
"""
Sets the number of active points for the next run.
"""
active_points = int(active_points)
if active_points <= 5:
raise ValueError('Number of active points must be greater than 5.')
self._n_active_points = active_points
self._m_active = np.zeros((self._n_active_points,
self._n_parameters + 1))
def set_hyper_parameters(self, x):
"""
See :meth:`TunableMethod.set_hyper_parameters()`.
"""
raise NotImplementedError
def set_initial_phase(self, in_initial_phase):
"""
For methods that need an initial phase (see
:meth:`needs_initial_phase()`), this method toggles the initial phase
algorithm. For other methods a ``NotImplementedError`` is returned.
"""
raise NotImplementedError
def _set_running_log_likelihood(self, running_log_likelihood):
"""
Updates the current value of the threshold log-likelihood value.
"""
self._running_log_likelihood = running_log_likelihood
def tell(self, fx):
"""
If a single evaluation is provided as arguments, a single point is
accepted and returned if its likelihood exceeds the current threshold;
otherwise None is returned.
If multiple evaluations are provided as arguments (for example, if
running the algorithm in parallel), None is returned if no points
have likelihood exceeding threshold; if a single point passes the
threshold, it is returned; if multiple points pass, one is selected
uniformly at random and returned and the others are stored for later
use.
In all cases, two objects are returned: the proposed point (which may
be None) and an array of other points that also pass the threshold
(which is empty for single evaluation mode but may be non-empty for
multiple evaluation mode).
"""
# for serial evaluation just return point or None and an empty array
if np.isscalar(fx):
self._n_evals += 1
if np.isnan(fx) or fx < self._running_log_likelihood:
return None, np.array([[]])
else:
proposed = self._proposed
fx_temp = fx
winners = np.array([[]])
# if running in parallel, then fx will be a sequence
else:
a_len = len(fx)
self._n_evals += a_len
results = []
for i in range(a_len):
if np.isnan(fx[i]) or fx[i] < self._running_log_likelihood:
results.append(None)
else:
results.append(fx[i])
n_non_none = sum(x is not None for x in results)
# if none pass threshold return None and an empty array
if n_non_none == 0:
return None, np.array([[]])
# if one passes then return it and an empty array
elif n_non_none == 1:
fx_temp = next(item for item in results if item is not None)
index = results.index(fx_temp)
proposed = self._proposed[index]
winners = np.array([[]])
# if more than a single point passes select at random from multiple
# non-nones and return it and an array of the other points whose
# likelihood exceeds threshold
else:
fx_short = [i for i in results if i]
idex = [results.index(i) for i in fx_short]
proposed_short = [self._proposed[i] for i in idex]
fx_temp = np.random.choice(fx_short)
index_temp = results.index(fx_temp)
proposed = self._proposed[index_temp]
index1 = fx_short.index(fx_temp)
del proposed_short[index1]
fx_short.remove(fx_temp)
winners = np.transpose(
np.vstack([np.transpose(proposed_short), fx_short]))
self._m_active[self._min_index, :] = np.concatenate(
(proposed, np.array([fx_temp])))
self._min_index = np.argmin(
self._m_active[:, self._n_parameters])
self._set_running_log_likelihood(
np.min(self._m_active[:, self._n_parameters]))
self._accept_count += 1
return proposed, winners
class NestedController(object):
"""
Uses nested sampling to sample from a posterior distribution.
Parameters
----------
log_likelihood : pints.LogPDF
A :class:`LogPDF` function that evaluates points in the parameter
space.
log_prior : pints.LogPrior
A :class:`LogPrior` function on the same parameter space.
References
----------
.. [1] "Nested Sampling for General Bayesian Computation", John Skilling,
Bayesian Analysis 1:4 (2006).
https://doi.org/10.1214/06-BA127
.. [2] "Multimodal nested sampling: an efficient and robust alternative
to Markov chain Monte Carlo methods for astronomical data analyses"
F. Feroz and M. P. Hobson, 2008, Mon. Not. R. Astron. Soc.
"""
def __init__(self, log_likelihood, log_prior, method=None):
# Store log_likelihood and log_prior
# if not isinstance(log_likelihood, pints.LogLikelihood):
if not isinstance(log_likelihood, pints.LogPDF):
raise ValueError(
'Given log_likelihood must extend pints.LogLikelihood')
self._log_likelihood = log_likelihood
# Store function
if not isinstance(log_prior, pints.LogPrior):
raise ValueError('Given log_prior must extend pints.LogPrior')
self._log_prior = log_prior
# Get dimension
self._n_parameters = self._log_likelihood.n_parameters()
if self._n_parameters != self._log_prior.n_parameters():
raise ValueError(
'Given log_likelihood and log_prior must have same number of'
' parameters.')
# Logging
self._log_to_screen = True
self._log_filename = None
self._log_csv = False
# By default do serial evaluation
self._parallel = False
self._n_workers = 1
self.set_parallel()
# Parameters common to all routines
# Total number of iterations
self._iterations = 1000
# Total number of posterior samples
self._posterior_samples = 1000
# Convergence criterion in log-evidence
self._marginal_log_likelihood_threshold = 0.5
# Initial marginal difference
self._diff = np.float('-Inf')
# By default use ellipsoidal sampling
if method is None:
method = pints.NestedEllipsoidSampler
else:
try:
ok = issubclass(method, pints.NestedSampler)
except TypeError: # Not a class
ok = False
if not ok:
raise ValueError(
'Given method must extend pints.NestedSampler.'
)
self._sampler = method(log_prior=self._log_prior)
# Check if sensitivities are required
self._needs_sensitivities = self._sampler.needs_sensitivities()
# Performance metrics
self._time = None
# :meth:`run` can only be called once
self._has_run = False
def active_points(self):
"""
Returns the active points from nested sampling.
"""
return self._sampler.active_points()
def _diff_marginal_likelihood(self, i, d):
"""
Calculates difference in marginal likelihood between current and
previous iterations.
"""
v_temp = np.concatenate((
self._v_log_Z[0:(i - 1)],
[np.max(self._sampler._m_active[:, d])]
))
w_temp = np.concatenate((self._w[0:(i - 1)], [self._X[i]]))
self._diff = (
+ logsumexp(self._v_log_Z[0:(i - 1)], b=self._w[0:(i - 1)])
- logsumexp(v_temp, b=w_temp)
)
def effective_sample_size(self):
r"""
Calculates the effective sample size of posterior samples from a
nested sampling run using the formula:
.. math::
ESS = exp(-sum_{i=1}^{m} p_i log p_i),
in other words, the information. Given by eqn. (39) in [1]_.
"""
self._log_vP = (self._m_samples_all[:, self._n_parameters]
- self._log_Z + np.log(self._w))
return np.exp(-np.sum(self._vP * self._log_vP))
def inactive_points(self):
"""
Returns the inactive points from nested sampling.
"""
return self._m_inactive
def _initialise_callable(self):
"""
Initialises sensitivities if they are needed; otherwise, returns
a callable log likelihood.
"""
f = self._log_likelihood
if self._needs_sensitivities:
f = f.evaluateS1
return f
def _initialise_evaluator(self, f):
"""
Initialises parallel runners, if desired.
"""
# Create evaluator object
if self._parallel:
# Use at most n_workers workers
n_workers = self._n_workers
evaluator = pints.ParallelEvaluator(
f, n_workers=n_workers)
else:
evaluator = pints.SequentialEvaluator(f)
return evaluator
def _initialise_logger(self):
"""
Initialises logger.
"""
# Start logging
self._logging = self._log_to_screen or self._log_filename
if self._logging:
if self._log_to_screen:
# Show current settings
print('Running ' + self._sampler.name())
print('Number of active points: ' +
str(self._n_active_points))
print('Total number of iterations: ' + str(self._iterations))
print('Total number of posterior samples: ' + str(
self._posterior_samples))
# Set up logger
self._logger = pints.Logger()
if not self._log_to_screen:
self._logger.set_stream(None)
if self._log_filename:
self._logger.set_filename(
self._log_filename, csv=self._log_csv)
# Add fields to log
self._logger.add_counter('Iter.', max_value=self._iterations)
self._logger.add_counter('Eval.', max_value=self._iterations * 10)
self._logger.add_time('Time m:s')
self._logger.add_float('Delta_log(z)')
self._logger.add_float('Acceptance rate')
def _initial_points(self):
"""
Generates initial active points.
"""
m_initial = self._log_prior.sample(self._n_active_points)
v_fx = np.zeros(self._n_active_points)
for i in range(0, self._n_active_points):
# Calculate likelihood
v_fx[i] = self._evaluator.evaluate([m_initial[i, :]])[0]
self._sampler._n_evals += 1
# Show progress
if self._logging and i >= self._next_message:
# Log state
self._logger.log(0, self._sampler._n_evals,
self._timer.time(), self._diff, 1.0)
# Choose next logging point
if i > self._message_warm_up:
self._next_message = self._message_interval * (
1 + i // self._message_interval)
self._next_message = 0
return v_fx, m_initial
def iterations(self):
"""
Returns the total number of iterations that will be performed in the
next run.
"""
return self._iterations
def log_likelihood_vector(self):
"""
Returns vector of log likelihoods for each of the stacked
``[m_active, m_inactive]`` points.
"""
return self._m_samples_all[:, -1]
def marginal_log_likelihood(self):
"""
Calculates the marginal log likelihood of nested sampling run.
"""
# Include active particles in sample
m_active = self._sampler.active_points()
self._v_log_Z[self._iterations] = logsumexp(m_active[:,
self._n_parameters])
self._w[self._iterations:] = float(self._X[self._iterations]) / float(
self._sampler.n_active_points())
self._m_samples_all = np.vstack((self._m_inactive, m_active))
# Determine log evidence
log_Z = logsumexp(self._v_log_Z,
b=self._w[0:(self._iterations + 1)])
self._log_Z_called = True
return log_Z
def marginal_log_likelihood_standard_deviation(self):
"""
Calculates standard deviation in marginal log likelihood as in [2]_.
"""
if not self._log_Z_called:
self.marginal_log_likelihood()
log_L_minus_Z = self._v_log_Z - self._log_Z
log_Z_sd = logsumexp(log_L_minus_Z,
b=self._w[0:(self._iterations + 1)] *
log_L_minus_Z)
log_Z_sd = np.sqrt(log_Z_sd / self._sampler.n_active_points())
return log_Z_sd
def marginal_log_likelihood_threshold(self):
"""
Returns threshold for determining convergence in estimate of marginal
log likelihood which leads to early termination of the algorithm.
"""
return self._marginal_log_likelihood_threshold
def n_posterior_samples(self):
"""
Returns the number of posterior samples that will be returned (see
:meth:`set_n_posterior_samples()`).
"""
return self._posterior_samples
def parallel(self):
"""
Returns the number of parallel worker processes this routine will be
run on, or ``False`` if parallelisation is disabled.
"""
return self._n_workers if self._parallel else False
def posterior_samples(self):
"""
Returns posterior samples generated during run of nested
sampling object.
"""
return self._m_posterior_samples
def prior_space(self):
"""
Returns a vector of X samples which approximates the proportion
of prior space compressed.
"""
return self._X
def run(self):
"""
Runs the nested sampling routine and returns a tuple of the posterior
samples and an estimate of the marginal likelihood.
"""
# Can only run once for each controller instance
if self._has_run:
raise RuntimeError("Controller is valid for single use only")
self._has_run = True
# Choose method to evaluate
f = self._initialise_callable()
# Set parallel
self._evaluator = self._initialise_evaluator(f)
# Set number of active points
self._n_active_points = self._sampler.n_active_points()
# Start timing
self._timer = pints.Timer()
# Set up progress reporting
self._next_message = 0
self._message_warm_up = 0
self._message_interval = 20
self._initialise_logger()
d = self._n_parameters
v_fx, m_initial = self._initial_points()
self._sampler._initialise_active_points(m_initial, v_fx)
# store all inactive points, along with their respective
# log-likelihoods (hence, d+1)
self._m_inactive = np.zeros((self._iterations, d + 1))
# store weights
self._w = np.zeros(self._n_active_points + self._iterations)
# store X values (defined in [1])
self._X = np.zeros(self._iterations + 1)
self._X[0] = 1
# log marginal likelihood holder
self._v_log_Z = np.zeros(self._iterations + 1)
# Run!
self._X[0] = 1.0
self._i_message = 0
i_winners = 0
m_previous_winners = []
for i in range(0, self._iterations):
i_iter_complete = 0
self._i = i
a_min_index = self._sampler.min_index()
self._X[i + 1] = np.exp(-(i + 1) / self._n_active_points)
if i > 0:
self._w[i] = 0.5 * (self._X[i - 1] - self._X[i + 1])
else:
self._w[i] = self._X[i] - self._X[i + 1]
self._v_log_Z[i] = self._sampler.running_log_likelihood()
self._m_inactive[i, :] = self._sampler._m_active[a_min_index, :]
# check whether previous winners exceed threshold
if i_winners > 0:
m_previous_winners = m_previous_winners[(
m_previous_winners[:, self._n_parameters] >
self._sampler.running_log_likelihood()), :]
if m_previous_winners.shape[0] > 0:
index = np.random.choice(m_previous_winners.shape[0],
1, replace=False)
proposed = m_previous_winners[index, :self._n_parameters]
fx_temp = m_previous_winners[index, self._n_parameters]
m_previous_winners = np.delete(m_previous_winners,
index, 0)
self._sampler._m_active[self._sampler._min_index, :] = (
np.concatenate((proposed[0], fx_temp))
)
self._sampler._min_index = np.argmin(
self._sampler._m_active[:, self._n_parameters])
self._sampler._set_running_log_likelihood(
np.min(self._sampler._m_active[:, self._n_parameters])
)
self._sampler._accept_count += 1
i_iter_complete = 1
if i_iter_complete == 0:
# Propose new samples
proposed = self._sampler.ask(self._n_workers)
# Evaluate their fit
if self._n_workers > 1:
log_likelihood = self._evaluator.evaluate(proposed)
else:
log_likelihood = self._evaluator.evaluate([proposed])[0]
sample, winners = self._sampler.tell(log_likelihood)
while sample is None:
proposed = self._sampler.ask(self._n_workers)
if self._n_workers > 1:
log_likelihood = ( # pragma: no cover
self._evaluator.evaluate(proposed))
else:
log_likelihood = self._evaluator.evaluate(
[proposed])[0]
sample, winners = self._sampler.tell(log_likelihood)
if winners.size > 0:
if i_winners == 0:
m_previous_winners = winners
i_winners = 1
else:
m_previous_winners = [m_previous_winners, winners]
m_previous_winners = np.concatenate(m_previous_winners)
# Check whether within convergence threshold
if i > 2:
self._diff_marginal_likelihood(i, d)
if (np.abs(self._diff) <
self._marginal_log_likelihood_threshold):
if self._log_to_screen:
print( # pragma: no cover
'Convergence obtained with Delta_z = ' +
str(self._diff))
# shorten arrays according to current iteration
self._iterations = i
self._v_log_Z = self._v_log_Z[0:(self._iterations + 1)]
self._w = self._w[0:(
self._n_active_points + self._iterations)]
self._X = self._X[0:(self._iterations + 1)]
self._m_inactive = self._m_inactive[0:self._iterations, :]
break
# Show progress
self._update_logger()
# Calculate log_evidence and uncertainty
self._log_Z = self.marginal_log_likelihood()
self._log_Z_sd = self.marginal_log_likelihood_standard_deviation()
# Draw samples from posterior
n = self._posterior_samples
self._m_posterior_samples = self.sample_from_posterior(n)
# Stop timer
self._time = self._timer.time()
return self._m_posterior_samples
def sample_from_posterior(self, posterior_samples):
"""
Draws posterior samples based on nested sampling run using importance
sampling. This function is automatically called in
``NestedController.run()`` but can also be called afterwards to obtain
new posterior samples.
"""
if posterior_samples < 1:
raise ValueError('Number of posterior samples must be positive.')
# Calculate probabilities (can this be used to calculate effective
# sample size as in importance sampling?) of each particle
self._vP = np.exp(self._m_samples_all[:, self._n_parameters]
- self._log_Z) * self._w
# Draw posterior samples
m_theta = self._m_samples_all[:, :-1]
vIndex = np.random.choice(
range(0, self._iterations + self._sampler.n_active_points()),
size=posterior_samples, p=self._vP)
m_posterior_samples = m_theta[vIndex, :]
return m_posterior_samples
def set_iterations(self, iterations):
"""
Sets the total number of iterations to be performed in the next run.
"""
iterations = int(iterations)
if iterations < 0:
raise ValueError('Number of iterations cannot be negative.')
self._iterations = iterations
def set_log_to_file(self, filename=None, csv=False):
"""
Enables logging to file when a filename is passed in, disables it if
``filename`` is ``False`` or ``None``.
The argument ``csv`` can be set to ``True`` to write the file in comma
separated value (CSV) format. By default, the file contents will be
similar to the output on screen.
"""
if filename:
self._log_filename = str(filename)
self._log_csv = True if csv else False
else:
self._log_filename = None
self._log_csv = False
def set_log_to_screen(self, enabled):
"""
Enables or disables logging to screen.
"""
self._log_to_screen = True if enabled else False
def set_marginal_log_likelihood_threshold(self, threshold):
"""
Sets threshold for determining convergence in estimate of marginal
log likelihood which leads to early termination of the algorithm.
"""
if threshold <= 0:
raise ValueError('Convergence threshold must be positive.')
self._marginal_log_likelihood_threshold = threshold
def set_parallel(self, parallel=False):
"""
Enables/disables parallel evaluation.
If ``parallel=True``, the method will run using a number of worker
processes equal to the detected cpu core count. The number of workers
can be set explicitly by setting ``parallel`` to an integer greater
than 0.
Parallelisation can be disabled by setting ``parallel`` to ``0`` or
``False``.
"""
if parallel is True:
self._parallel = True
self._n_workers = pints.ParallelEvaluator.cpu_count()
elif parallel >= 1:
self._parallel = True
self._n_workers = int(parallel)
else:
self._parallel = False
self._n_workers = 1
def set_n_posterior_samples(self, posterior_samples):
"""
Sets the number of posterior samples to generate from points proposed
by the nested sampling algorithm.
"""
posterior_samples = int(posterior_samples)
if posterior_samples < 1:
raise ValueError(
'Number of posterior samples must be greater than zero.')
self._posterior_samples = posterior_samples
def time(self):
"""
Returns the time needed for the last run, in seconds, or ``None`` if
the controller hasn't run yet.
"""
return self._time
def _update_logger(self):
"""
Updates logger if necessary.
"""
# print(self._i_message)
# print(self._next_message)
if self._logging:
self._i_message += 1
if self._i_message >= self._next_message:
# Log state
self._logger.log(self._i_message, self._sampler._n_evals,
self._timer.time(), self._diff,
float(self._sampler._accept_count /
(self._sampler._n_evals -
self._sampler._n_active_points)))
# Choose next logging point
if self._i_message > self._message_warm_up:
self._next_message = self._message_interval * (
1 + self._i_message // self._message_interval)
| |
"""
BlogInterface.py -- General blogging interface
Ken Kinder
2004-08-06
"""
import urllib2, unittest, urlparse, xmlrpclib, base64, time, sha, random, socket, validation
from BlogDiscovery import BlogDiscovery
from urllib import basejoin
from HTMLParser import HTMLParser
from xml.dom import minidom
from util import *
from xml.sax.saxutils import escape
class BlogInterfaceError(Exception):
pass
class BadLoginError(BlogInterfaceError):
pass
class BlogInterface(object):
"""
Finds out everything there is to know about a blog.
"""
available_interfaces = {}
def __init__(self, api, service, blog_name, api_username,
api_password, api_blog_id, api_url, url, passed=True):
"""
Call from_discovery or from_data -- do not call this method directly
"""
if passed:
raise BlogInterfaceError, 'Do not directly construct BlogInterface. Use from_discovery or from_data'
self.api = api
self.service = service
self.blog_name = blog_name
self.api_username = api_username
self.api_password = api_password
self.api_blog_id = api_blog_id
self.api_url = api_url
self.url = url
def verify_login(self):
"""
Returns True if login is usable.
"""
return True
def do_post(self, title, body, is_draft=False):
"""
Posts to BLOG.
"""
raise BlogInterfaceError, 'This blog cannot be posted to'
def from_discovery(cls, blog_discovery):
"""
Creates appropriate BlogInterface object from BlogDiscovery object
"""
override_apis = []
if blog_discovery.service == blog_discovery.SRV_WORDPRESS:
override_apis.append(blog_discovery.API_METAWEBLOG)
for api in blog_discovery.preferred_apis + blog_discovery.supported_apis:
i = cls.from_data(
api, blog_discovery.service, blog_discovery.title,
blog_discovery.username, blog_discovery.password,
blog_discovery.api_blog_ids.get(api, 0),
blog_discovery.api_urls[api], blog_discovery.url)
if i:
return i
from_discovery = classmethod(from_discovery)
def from_data(cls, api, service, blog_name, api_username,
api_password, api_blog_id, api_url, url):
"""
Creates appropriate BlogInterface object from arguments passed.
Returns None if no interface can be found.
"""
for interface_api, interface_class in cls.interfaces.items():
if interface_api == api:
return interface_class(
api, service, blog_name, api_username,
api_password, api_blog_id, api_url, url, False)
from_data = classmethod(from_data)
class AtomInterface(BlogInterface):
def get_nonce(cls):
return sha.sha(str(random.random())).hexdigest()
get_nonce = classmethod(get_nonce)
def do_post(self, title, body, is_draft=False):
body = body.replace('\n', ' ')
body = body.replace('\r', ' ')
#content = '<div xmlns="http://www.w3.org/1999/xhtml">%s</div>' % body
content = escape(body)
title = escape(title)
atomlink = self.api_url
issued = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
# Put password XSSE encoding together
nonce = AtomInterface.get_nonce()
base64_nonce = base64.encodestring(nonce).replace("\n", "")
password_digest = base64.encodestring(sha.sha('%s%s%s' % (nonce, issued, self.api_password)).digest()).replace("\n", "")
#
# Put XML body together
body = """<?xml version="1.0" encoding="UTF-8" ?>
<entry xmlns="http://purl.org/atom/ns#">
<generator url="http://www.daikini.com/source/atomexamples/python/">Daikini Software Python Atom Example</generator>
<title mode="escaped" type="text/html">""" + title + """</title>
<issued>""" + issued + """</issued>
<content type="text/html" mode="escaped">""" + content + """</content>
</entry>"""
# Finally put request together
req = urllib2.Request(url=atomlink,data=body)
req.add_header("Content-type", "application/atom+xml")
#req.add_header('Cookie', 'ljfastserver=1')
req.add_header("Authorization", 'WSSE profile="UsernameToken"')
req.add_header("X-WSSE", 'UsernameToken Username="%s", PasswordDigest="%s", Created="%s", Nonce="%s"' % (self.api_username, password_digest, issued, base64_nonce))
req.add_header("User-Agent", USER_AGENT)
try:
urllib2.urlopen(req)
except urllib2.HTTPError, val:
if val.code != 201:
raise
def verify_login(self):
pass
class BloggerInterface(BlogInterface):
CLIENT_ID = 'zoto.com'
APP_KEY = 'ffffffa420ffffff9efffffffbffffffa701ffffffdd6b5affffffb7ffffffff3b10ffffffb3fffffffd4affffffe32a5d147effffffc5166f'
def do_post(self, title, body, is_draft=False):
try:
body = body.replace('\n', ' ')
body = body.replace('\r', ' ')
auth_dict = {'username': self.api_username, 'password': self.api_password,
'appkey': self.APP_KEY, 'blogID': str(self.api_blog_id)}
post_dict = {'title': title, 'body': body, 'blogID': str(self.api_blog_id),
'postOptions': {'title': title, 'convertLineBreaks': False}}
if is_draft:
actions_dict = {'doPublish': False, 'makeDraft': True}
else:
actions_dict = {'doPublish': True, 'makeDraft': False}
sp = xmlrpclib.ServerProxy(self.api_url)
try:
post_id = sp.blogger2.newPost(auth_dict, post_dict, actions_dict)
except:
#
# Handle the stupid crap socket error from WordPress
try:
post_id = sp.blogger.newPost(
self.APP_KEY, str(self.api_blog_id), self.api_username,
self.api_password, body, not is_draft)
except socket.timeout:
if self.service == 'WordPress':
pass
else:
raise
except xmlrpclib.Fault, val:
raise BlogInterfaceError, val
def verify_login(self):
print 'Blogger Interface - Verify Login'
sp = xmlrpclib.ServerProxy(self.api_url)
try:
if sp.blogger.getUserInfo(self.APP_KEY, self.api_username, self.api_password):
return True
else:
return False
except xmlrpclib.Fault, val:
if 'UserNotAuthorizedException' in str(val) or \
'Invalid login' in str(val) or \
'Wrong username/password combination' in str(val):
raise BadLoginError, val
else:
raise BlogInterfaceError, val
class LJBloggerInterface(BloggerInterface):
def do_post(self, title, body, is_draft=False):
if title:
body = "<title>%s</title>%s" % (title.replace('&', '&').replace('<', '<').replace('>', '>'), body)
return BloggerInterface.do_post(self, title, body, is_draft)
class MetaWebInterface(BlogInterface):
def do_post(self, title, body, is_draft=False):
try:
body = body.replace('\n', ' ')
body = body.replace('\r', ' ')
sp = xmlrpclib.ServerProxy(self.api_url)
# if blog_id isn't set because there was no RSD stuff in the template, then we can use (probably) the
# blogger API to fetch the blog_id from the user's blog.
if not self.api_blog_id:
response = sp.blogger.getUsersBlogs('zotohere', self.api_username, self.api_password)
for blogs in response:
# check both with and without an extra / on the end
if blogs['url'] == self.url or blogs['url']+"/" == self.url:
self.api_blog_id = blogs['blogid']
return sp.metaWeblog.newPost(
self.api_blog_id, self.api_username, self.api_password,
{'title': title, 'description': body}, True)
except xmlrpclib.Fault, val:
raise BlogInterfaceError, val
def verify_login(self):
pass
BlogInterface.interfaces = {
BlogDiscovery.API_ATOM: AtomInterface,
BlogDiscovery.API_BLOGGER: BloggerInterface,
BlogDiscovery.API_LJBLOGGER: LJBloggerInterface,
BlogDiscovery.API_METAWEBLOG: MetaWebInterface
}
| |
#!/usr/bin/env python
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from _adb import Adb
from _benchresult import BenchResult
from _hardware import HardwareException, Hardware
from argparse import ArgumentParser
from multiprocessing import Queue
from threading import Thread, Timer
import collections
import glob
import math
import re
import subprocess
import sys
import time
__argparse = ArgumentParser(description="""
Executes the skpbench binary with various configs and skps.
Also monitors the output in order to filter out and re-run results that have an
unacceptable stddev.
""")
__argparse.add_argument('skpbench',
help="path to the skpbench binary")
__argparse.add_argument('--adb',
action='store_true', help="execute skpbench over adb")
__argparse.add_argument('--adb_binary', default='adb',
help="The name of the adb binary to use.")
__argparse.add_argument('-s', '--device-serial',
help="if using adb, ID of the specific device to target "
"(only required if more than 1 device is attached)")
__argparse.add_argument('-m', '--max-stddev',
type=float, default=4,
help="initial max allowable relative standard deviation")
__argparse.add_argument('-x', '--suffix',
help="suffix to append on config (e.g. '_before', '_after')")
__argparse.add_argument('-w','--write-path',
help="directory to save .png proofs to disk.")
__argparse.add_argument('-v','--verbosity',
type=int, default=1, help="level of verbosity (0=none to 5=debug)")
__argparse.add_argument('-d', '--duration',
type=int, help="number of milliseconds to run each benchmark")
__argparse.add_argument('-l', '--sample-ms',
type=int, help="duration of a sample (minimum)")
__argparse.add_argument('--force',
action='store_true',
help="perform benchmarking on unrecognized Android devices")
__argparse.add_argument('--gpu',
action='store_true',
help="perform timing on the gpu clock instead of cpu (gpu work only)")
__argparse.add_argument('--fps',
action='store_true', help="use fps instead of ms")
__argparse.add_argument('--pr',
help="comma- or space-separated list of GPU path renderers, including: "
"[[~]all [~]default [~]dashline [~]nvpr [~]msaa [~]aaconvex "
"[~]aalinearizing [~]small [~]tess]")
__argparse.add_argument('--cc',
action='store_true', help="allow coverage counting shortcuts to render paths")
__argparse.add_argument('--nocache',
action='store_true', help="disable caching of path mask textures")
__argparse.add_argument('-c', '--config',
default='gl', help="comma- or space-separated list of GPU configs")
__argparse.add_argument('-a', '--resultsfile',
help="optional file to append results into")
__argparse.add_argument('--ddl',
action='store_true', help="record the skp into DDLs before rendering")
__argparse.add_argument('--ddlNumAdditionalThreads',
type=int, default=0,
help="number of DDL recording threads in addition to main one")
__argparse.add_argument('--ddlTilingWidthHeight',
type=int, default=0, help="number of tiles along one edge when in DDL mode")
__argparse.add_argument('--gpuThreads',
type=int, default=-1,
help="Create this many extra threads to assist with GPU work, including"
" software path rendering. Defaults to two.")
__argparse.add_argument('srcs',
nargs='+',
help=".skp files or directories to expand for .skp files, and/or .svg files")
FLAGS = __argparse.parse_args()
if FLAGS.adb:
import _adb_path as _path
_path.init(FLAGS.device_serial, FLAGS.adb_binary)
else:
import _os_path as _path
def dump_commandline_if_verbose(commandline):
if FLAGS.verbosity >= 5:
quoted = ['\'%s\'' % re.sub(r'([\\\'])', r'\\\1', x) for x in commandline]
print(' '.join(quoted), file=sys.stderr)
class StddevException(Exception):
pass
class Message:
READLINE = 0,
POLL_HARDWARE = 1,
EXIT = 2
def __init__(self, message, value=None):
self.message = message
self.value = value
class SubprocessMonitor(Thread):
def __init__(self, queue, proc):
self._queue = queue
self._proc = proc
Thread.__init__(self)
def run(self):
"""Runs on the background thread."""
for line in iter(self._proc.stdout.readline, b''):
self._queue.put(Message(Message.READLINE, line.decode('utf-8').rstrip()))
self._queue.put(Message(Message.EXIT))
class SKPBench:
ARGV = [FLAGS.skpbench, '--verbosity', str(FLAGS.verbosity)]
if FLAGS.duration:
ARGV.extend(['--duration', str(FLAGS.duration)])
if FLAGS.sample_ms:
ARGV.extend(['--sampleMs', str(FLAGS.sample_ms)])
if FLAGS.gpu:
ARGV.extend(['--gpuClock', 'true'])
if FLAGS.fps:
ARGV.extend(['--fps', 'true'])
if FLAGS.pr:
ARGV.extend(['--pr'] + re.split(r'[ ,]', FLAGS.pr))
if FLAGS.cc:
ARGV.extend(['--cc', 'true'])
if FLAGS.nocache:
ARGV.extend(['--cachePathMasks', 'false'])
if FLAGS.gpuThreads != -1:
ARGV.extend(['--gpuThreads', str(FLAGS.gpuThreads)])
# DDL parameters
if FLAGS.ddl:
ARGV.extend(['--ddl', 'true'])
if FLAGS.ddlNumAdditionalThreads:
ARGV.extend(['--ddlNumAdditionalThreads',
str(FLAGS.ddlNumAdditionalThreads)])
if FLAGS.ddlTilingWidthHeight:
ARGV.extend(['--ddlTilingWidthHeight', str(FLAGS.ddlTilingWidthHeight)])
if FLAGS.adb:
if FLAGS.device_serial is None:
ARGV[:0] = [FLAGS.adb_binary, 'shell']
else:
ARGV[:0] = [FLAGS.adb_binary, '-s', FLAGS.device_serial, 'shell']
@classmethod
def get_header(cls, outfile=sys.stdout):
commandline = cls.ARGV + ['--duration', '0']
dump_commandline_if_verbose(commandline)
out = subprocess.check_output(commandline, stderr=subprocess.STDOUT)
return out.rstrip()
@classmethod
def run_warmup(cls, warmup_time, config):
if not warmup_time:
return
print('running %i second warmup...' % warmup_time, file=sys.stderr)
commandline = cls.ARGV + ['--duration', str(warmup_time * 1000),
'--config', config,
'--src', 'warmup']
dump_commandline_if_verbose(commandline)
output = subprocess.check_output(commandline, stderr=subprocess.STDOUT)
# validate the warmup run output.
for line in output.decode('utf-8').split('\n'):
match = BenchResult.match(line.rstrip())
if match and match.bench == 'warmup':
return
raise Exception('Invalid warmup output:\n%s' % output)
def __init__(self, src, config, max_stddev, best_result=None):
self.src = src
self.config = config
self.max_stddev = max_stddev
self.best_result = best_result
self._queue = Queue()
self._proc = None
self._monitor = None
self._hw_poll_timer = None
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if self._proc:
self.terminate()
if self._hw_poll_timer:
self._hw_poll_timer.cancel()
def execute(self, hardware):
hardware.sanity_check()
self._schedule_hardware_poll()
commandline = self.ARGV + ['--config', self.config,
'--src', self.src,
'--suppressHeader', 'true']
if FLAGS.write_path:
pngfile = _path.join(FLAGS.write_path, self.config,
_path.basename(self.src) + '.png')
commandline.extend(['--png', pngfile])
dump_commandline_if_verbose(commandline)
self._proc = subprocess.Popen(commandline, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._monitor = SubprocessMonitor(self._queue, self._proc)
self._monitor.start()
while True:
message = self._queue.get()
if message.message == Message.READLINE:
result = BenchResult.match(message.value)
if result:
hardware.sanity_check()
self._process_result(result)
elif hardware.filter_line(message.value):
print(message.value, file=sys.stderr)
continue
if message.message == Message.POLL_HARDWARE:
hardware.sanity_check()
self._schedule_hardware_poll()
continue
if message.message == Message.EXIT:
self._monitor.join()
self._proc.wait()
if self._proc.returncode != 0:
raise Exception("skpbench exited with nonzero exit code %i" %
self._proc.returncode)
self._proc = None
break
def _schedule_hardware_poll(self):
if self._hw_poll_timer:
self._hw_poll_timer.cancel()
self._hw_poll_timer = \
Timer(1, lambda: self._queue.put(Message(Message.POLL_HARDWARE)))
self._hw_poll_timer.start()
def _process_result(self, result):
if not self.best_result or result.stddev <= self.best_result.stddev:
self.best_result = result
elif FLAGS.verbosity >= 2:
print("reusing previous result for %s/%s with lower stddev "
"(%s%% instead of %s%%)." %
(result.config, result.bench, self.best_result.stddev,
result.stddev), file=sys.stderr)
if self.max_stddev and self.best_result.stddev > self.max_stddev:
raise StddevException()
def terminate(self):
if self._proc:
self._proc.terminate()
self._monitor.join()
self._proc.wait()
self._proc = None
def emit_result(line, resultsfile=None):
print(line)
sys.stdout.flush()
if resultsfile:
print(line, file=resultsfile)
resultsfile.flush()
def run_benchmarks(configs, srcs, hardware, resultsfile=None):
hasheader = False
benches = collections.deque([(src, config, FLAGS.max_stddev)
for src in srcs
for config in configs])
while benches:
try:
with hardware:
SKPBench.run_warmup(hardware.warmup_time, configs[0])
if not hasheader:
emit_result(SKPBench.get_header(), resultsfile)
hasheader = True
while benches:
benchargs = benches.popleft()
with SKPBench(*benchargs) as skpbench:
try:
skpbench.execute(hardware)
if skpbench.best_result:
emit_result(skpbench.best_result.format(FLAGS.suffix),
resultsfile)
else:
print("WARNING: no result for %s with config %s" %
(skpbench.src, skpbench.config), file=sys.stderr)
except StddevException:
retry_max_stddev = skpbench.max_stddev * math.sqrt(2)
if FLAGS.verbosity >= 1:
print("stddev is too high for %s/%s (%s%%, max=%.2f%%), "
"re-queuing with max=%.2f%%." %
(skpbench.best_result.config, skpbench.best_result.bench,
skpbench.best_result.stddev, skpbench.max_stddev,
retry_max_stddev),
file=sys.stderr)
benches.append((skpbench.src, skpbench.config, retry_max_stddev,
skpbench.best_result))
except HardwareException as exception:
skpbench.terminate()
if FLAGS.verbosity >= 4:
hardware.print_debug_diagnostics()
if FLAGS.verbosity >= 1:
print("%s; rebooting and taking a %i second nap..." %
(exception.message, exception.sleeptime), file=sys.stderr)
benches.appendleft(benchargs) # retry the same bench next time.
raise # wake hw up from benchmarking mode before the nap.
except HardwareException as exception:
time.sleep(exception.sleeptime)
def main():
# Delimiter is ',' or ' ', skip if nested inside parens (e.g. gpu(a=b,c=d)).
DELIMITER = r'[, ](?!(?:[^(]*\([^)]*\))*[^()]*\))'
configs = re.split(DELIMITER, FLAGS.config)
srcs = _path.find_skps(FLAGS.srcs)
assert srcs
if FLAGS.adb:
adb = Adb(FLAGS.device_serial, FLAGS.adb_binary,
echo=(FLAGS.verbosity >= 5))
model = adb.check('getprop ro.product.model').strip()
if model == 'Pixel C':
from _hardware_pixel_c import HardwarePixelC
hardware = HardwarePixelC(adb)
elif model == 'Pixel':
from _hardware_pixel import HardwarePixel
hardware = HardwarePixel(adb)
elif model == 'Pixel 2':
from _hardware_pixel2 import HardwarePixel2
hardware = HardwarePixel2(adb)
elif model == 'Nexus 6P':
from _hardware_nexus_6p import HardwareNexus6P
hardware = HardwareNexus6P(adb)
elif FLAGS.force:
from _hardware_android import HardwareAndroid
print("WARNING: %s: don't know how to monitor this hardware; results "
"may be unreliable." % model, file=sys.stderr)
hardware = HardwareAndroid(adb)
else:
raise Exception("%s: don't know how to monitor this hardware. "
"Use --force to bypass this warning." % model)
else:
hardware = Hardware()
if FLAGS.resultsfile:
with open(FLAGS.resultsfile, mode='a+') as resultsfile:
run_benchmarks(configs, srcs, hardware, resultsfile=resultsfile)
else:
run_benchmarks(configs, srcs, hardware)
if __name__ == '__main__':
main()
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client Libraries for Rackspace Resources."""
import hashlib
import random
import time
from glanceclient import client as gc
from oslo_config import cfg
from oslo_log import log as logging
from six.moves.urllib import parse
from swiftclient import utils as swiftclient_utils
from troveclient import client as tc
from heat.common import exception
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine.clients import client_plugin
from heat.engine.clients.os import cinder
from heat.engine.clients.os import glance
from heat.engine.clients.os import nova
from heat.engine.clients.os import swift
from heat.engine.clients.os import trove
LOG = logging.getLogger(__name__)
try:
import pyrax
except ImportError:
pyrax = None
class RackspaceClientPlugin(client_plugin.ClientPlugin):
pyrax = None
def _get_client(self, name):
if self.pyrax is None:
self._authenticate()
return self.pyrax.get_client(
name, cfg.CONF.region_name_for_services)
def _authenticate(self):
"""Create an authenticated client context."""
self.pyrax = pyrax.create_context("rackspace")
self.pyrax.auth_endpoint = self.context.auth_url
LOG.info(_LI("Authenticating username: %s"),
self.context.username)
tenant = self.context.tenant_id
tenant_name = self.context.tenant
self.pyrax.auth_with_token(self.context.auth_token,
tenant_id=tenant,
tenant_name=tenant_name)
if not self.pyrax.authenticated:
LOG.warn(_LW("Pyrax Authentication Failed."))
raise exception.AuthorizationFailure()
LOG.info(_LI("User %s authenticated successfully."),
self.context.username)
class RackspaceAutoScaleClient(RackspaceClientPlugin):
def _create(self):
"""Rackspace Auto Scale client."""
return self._get_client("autoscale")
class RackspaceCloudLBClient(RackspaceClientPlugin):
def _create(self):
"""Rackspace cloud loadbalancer client."""
return self._get_client("load_balancer")
class RackspaceCloudDNSClient(RackspaceClientPlugin):
def _create(self):
"""Rackspace cloud dns client."""
return self._get_client("dns")
class RackspaceNovaClient(nova.NovaClientPlugin,
RackspaceClientPlugin):
def _create(self):
"""Rackspace cloudservers client."""
client = self._get_client("compute")
if not client:
client = super(RackspaceNovaClient, self)._create()
return client
class RackspaceCloudNetworksClient(RackspaceClientPlugin):
def _create(self):
"""Rackspace cloud networks client.
Though pyrax "fixed" the network client bugs that were introduced
in 1.8, it still doesn't work for contexts because of caching of the
nova client.
"""
if not self.pyrax:
self._authenticate()
# need special handling now since the contextual
# pyrax doesn't handle "networks" not being in
# the catalog
ep = pyrax._get_service_endpoint(
self.pyrax, "compute", region=cfg.CONF.region_name_for_services)
cls = pyrax._client_classes['compute:network']
client = cls(self.pyrax,
region_name=cfg.CONF.region_name_for_services,
management_url=ep)
return client
class RackspaceTroveClient(trove.TroveClientPlugin):
"""Rackspace trove client.
Since the pyrax module uses its own client implementation for Cloud
Databases, we have to skip pyrax on this one and override the super
implementation to account for custom service type and regionalized
management url.
"""
def _create(self):
service_type = "rax:database"
con = self.context
endpoint_type = self._get_client_option('trove', 'endpoint_type')
args = {
'service_type': service_type,
'auth_url': con.auth_url,
'proxy_token': con.auth_token,
'username': None,
'password': None,
'cacert': self._get_client_option('trove', 'ca_file'),
'insecure': self._get_client_option('trove', 'insecure'),
'endpoint_type': endpoint_type
}
client = tc.Client('1.0', **args)
region = cfg.CONF.region_name_for_services
management_url = self.url_for(service_type=service_type,
endpoint_type=endpoint_type,
region_name=region)
client.client.auth_token = con.auth_token
client.client.management_url = management_url
return client
class RackspaceCinderClient(cinder.CinderClientPlugin):
def _create(self):
"""Override the region for the cinder client."""
client = super(RackspaceCinderClient, self)._create()
management_url = self.url_for(
service_type='volume',
region_name=cfg.CONF.region_name_for_services)
client.client.management_url = management_url
return client
class RackspaceSwiftClient(swift.SwiftClientPlugin):
def is_valid_temp_url_path(self, path):
"""Return True if path is a valid Swift TempURL path, False otherwise.
A Swift TempURL path must:
- Be five parts, ['', 'v1', 'account', 'container', 'object']
- Be a v1 request
- Have account, container, and object values
- Have an object value with more than just '/'s
:param path: The TempURL path
:type path: string
"""
parts = path.split('/', 4)
return bool(len(parts) == 5 and
not parts[0] and
parts[1] == 'v1' and
parts[2] and
parts[3] and
parts[4].strip('/'))
def get_temp_url(self, container_name, obj_name, timeout=None,
method='PUT'):
"""Return a Swift TempURL."""
def tenant_uuid():
access = self.context.auth_token_info['access']
for role in access['user']['roles']:
if role['name'] == 'object-store:default':
return role['tenantId']
key_header = 'x-account-meta-temp-url-key'
if key_header in self.client().head_account():
key = self.client().head_account()[key_header]
else:
key = hashlib.sha224(str(random.getrandbits(256))).hexdigest()[:32]
self.client().post_account({key_header: key})
path = '/v1/%s/%s/%s' % (tenant_uuid(), container_name, obj_name)
if timeout is None:
timeout = swift.MAX_EPOCH - 60 - time.time()
tempurl = swiftclient_utils.generate_temp_url(path, timeout, key,
method)
sw_url = parse.urlparse(self.client().url)
return '%s://%s%s' % (sw_url.scheme, sw_url.netloc, tempurl)
class RackspaceGlanceClient(glance.GlanceClientPlugin):
def _create(self):
con = self.context
endpoint_type = self._get_client_option('glance', 'endpoint_type')
endpoint = self.url_for(
service_type='image',
endpoint_type=endpoint_type,
region_name=cfg.CONF.region_name_for_services)
# Rackspace service catalog includes a tenant scoped glance
# endpoint so we have to munge the url a bit
glance_url = parse.urlparse(endpoint)
# remove the tenant and following from the url
endpoint = "%s://%s" % (glance_url.scheme, glance_url.hostname)
args = {
'auth_url': con.auth_url,
'service_type': 'image',
'project_id': con.tenant,
'token': self.auth_token,
'endpoint_type': endpoint_type,
'ca_file': self._get_client_option('glance', 'ca_file'),
'cert_file': self._get_client_option('glance', 'cert_file'),
'key_file': self._get_client_option('glance', 'key_file'),
'insecure': self._get_client_option('glance', 'insecure')
}
return gc.Client('2', endpoint, **args)
| |
"""
Support for Apple TV.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.apple_tv/
"""
import asyncio
import logging
import hashlib
import aiohttp
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_STOP, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_TURN_ON,
SUPPORT_TURN_OFF, MediaPlayerDevice, PLATFORM_SCHEMA, MEDIA_TYPE_MUSIC,
MEDIA_TYPE_VIDEO, MEDIA_TYPE_TVSHOW)
from homeassistant.const import (
STATE_IDLE, STATE_PAUSED, STATE_PLAYING, STATE_STANDBY, CONF_HOST,
STATE_OFF, CONF_NAME)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
REQUIREMENTS = ['pyatv==0.1.4']
_LOGGER = logging.getLogger(__name__)
CONF_LOGIN_ID = 'login_id'
CONF_START_OFF = 'start_off'
DEFAULT_NAME = 'Apple TV'
DATA_APPLE_TV = 'apple_tv'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_LOGIN_ID): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_START_OFF, default=False): cv.boolean
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Setup the Apple TV platform."""
import pyatv
if discovery_info is not None:
name = discovery_info['name']
host = discovery_info['host']
login_id = discovery_info['hsgid']
start_off = False
else:
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
login_id = config.get(CONF_LOGIN_ID)
start_off = config.get(CONF_START_OFF)
if DATA_APPLE_TV not in hass.data:
hass.data[DATA_APPLE_TV] = []
if host in hass.data[DATA_APPLE_TV]:
return False
hass.data[DATA_APPLE_TV].append(host)
details = pyatv.AppleTVDevice(name, host, login_id)
session = async_get_clientsession(hass)
atv = pyatv.connect_to_apple_tv(details, hass.loop, session=session)
entity = AppleTvDevice(atv, name, start_off)
yield from async_add_entities([entity], update_before_add=True)
class AppleTvDevice(MediaPlayerDevice):
"""Representation of an Apple TV device."""
def __init__(self, atv, name, is_off):
"""Initialize the Apple TV device."""
self._atv = atv
self._name = name
self._is_off = is_off
self._playing = None
self._artwork_hash = None
@callback
def _set_power_off(self, is_off):
self._playing = None
self._artwork_hash = None
self._is_off = is_off
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._is_off:
return STATE_OFF
if self._playing is not None:
from pyatv import const
state = self._playing.play_state
if state == const.PLAY_STATE_NO_MEDIA:
return STATE_IDLE
elif state == const.PLAY_STATE_PLAYING or \
state == const.PLAY_STATE_LOADING:
return STATE_PLAYING
elif state == const.PLAY_STATE_PAUSED or \
state == const.PLAY_STATE_FAST_FORWARD or \
state == const.PLAY_STATE_FAST_BACKWARD:
# Catch fast forward/backward here so "play" is default action
return STATE_PAUSED
else:
return STATE_STANDBY # Bad or unknown state?
@asyncio.coroutine
def async_update(self):
"""Retrieve latest state."""
if self._is_off:
return
from pyatv import exceptions
try:
playing = yield from self._atv.metadata.playing()
if self._has_playing_media_changed(playing):
base = str(playing.title) + str(playing.artist) + \
str(playing.album) + str(playing.total_time)
self._artwork_hash = hashlib.md5(
base.encode('utf-8')).hexdigest()
self._playing = playing
except exceptions.AuthenticationError as ex:
_LOGGER.warning('%s (bad login id?)', str(ex))
except aiohttp.errors.ClientOSError as ex:
_LOGGER.error('failed to connect to Apple TV (%s)', str(ex))
except asyncio.TimeoutError:
_LOGGER.warning('timed out while connecting to Apple TV')
def _has_playing_media_changed(self, new_playing):
if self._playing is None:
return True
old_playing = self._playing
return new_playing.media_type != old_playing.media_type or \
new_playing.title != old_playing.title
@property
def media_content_type(self):
"""Content type of current playing media."""
if self._playing is not None:
from pyatv import const
media_type = self._playing.media_type
if media_type == const.MEDIA_TYPE_VIDEO:
return MEDIA_TYPE_VIDEO
elif media_type == const.MEDIA_TYPE_MUSIC:
return MEDIA_TYPE_MUSIC
elif media_type == const.MEDIA_TYPE_TV:
return MEDIA_TYPE_TVSHOW
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._playing is not None:
return self._playing.total_time
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._playing is not None:
return self._playing.position
@property
def media_position_updated_at(self):
"""Last valid time of media position."""
state = self.state
if state == STATE_PLAYING or state == STATE_PAUSED:
return dt_util.utcnow()
@asyncio.coroutine
def async_play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player."""
yield from self._atv.remote_control.play_url(media_id, 0)
@property
def media_image_hash(self):
"""Hash value for media image."""
return self._artwork_hash
@asyncio.coroutine
def async_get_media_image(self):
"""Fetch media image of current playing image."""
return (yield from self._atv.metadata.artwork()), 'image/png'
@property
def media_title(self):
"""Title of current playing media."""
if self._playing is not None:
if self.state == STATE_IDLE:
return 'Nothing playing'
title = self._playing.title
return title if title else "No title"
@property
def supported_features(self):
"""Flag media player features that are supported."""
features = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PLAY_MEDIA
if self._playing is None or self.state == STATE_IDLE:
return features
features |= SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_SEEK | \
SUPPORT_STOP | SUPPORT_NEXT_TRACK | SUPPORT_PREVIOUS_TRACK
return features
@asyncio.coroutine
def async_turn_on(self):
"""Turn the media player on."""
self._set_power_off(False)
@asyncio.coroutine
def async_turn_off(self):
"""Turn the media player off."""
self._set_power_off(True)
def async_media_play_pause(self):
"""Pause media on media player.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
state = self.state
if state == STATE_PAUSED:
return self._atv.remote_control.play()
elif state == STATE_PLAYING:
return self._atv.remote_control.pause()
def async_media_play(self):
"""Play media.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
return self._atv.remote_control.play()
def async_media_pause(self):
"""Pause the media player.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
return self._atv.remote_control.pause()
def async_media_next_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
return self._atv.remote_control.next()
def async_media_previous_track(self):
"""Send previous track command.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
return self._atv.remote_control.previous()
def async_media_seek(self, position):
"""Send seek command.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
return self._atv.remote_control.set_position(position)
| |
#!/usr/local/bin/python
# encoding: utf-8
"""
*sorts the contents of all taskpaper files via workflow tags*
:Author:
David Young
:Date Created:
November 5, 2016
"""
################# GLOBAL IMPORTS ####################
import sys
import os
os.environ['TERM'] = 'vt100'
from fundamentals import tools
from fundamentals.renderer import list_of_dictionaries
from operator import itemgetter
import collections
import codecs
import textwrap
from tastic.tastic import document
class workspace():
"""
*tools for sorting, archiving and indexing tasks and maintaining the contents of all taskpaper files within a given workspace*
**Key Arguments:**
- ``log`` -- logger
- ``fileOrWorkspacePath`` -- the root path of the workspace you wish to sort the taskpaper docs within, or the path to a single taskpaper file
- ``settings`` -- the settings dictionary
**Usage:**
To setup your logger, settings and database connections, please use the ``fundamentals`` package (`see tutorial here <http://fundamentals.readthedocs.io/en/latest/#tutorial>`_).
To initiate a taskpaper workspace object, use the following:
.. code-block:: python
from tastic.workspace import workspace
ws = workspace(
log=log,
settings=settings,
fileOrWorkspacePath="/path/to/root/of/workspace"
)
or to target a single taskpaper document use instead the path to the file:
.. code-block:: python
from tastic.workspace import workspace
ws = workspace(
log=log,
settings=settings,
fileOrWorkspacePath="/path/to/doc.taskpaper"
)
"""
# Initialisation
def __init__(
self,
log,
fileOrWorkspacePath,
settings=False
):
self.log = log
log.debug("instansiating a new 'sort' object")
self.settings = settings
self.taskpaperPath = False
self.workspaceRoot = False
# xt-self-arg-tmpx
# INITIAL ACTIONS
# ARE WE DEALING WITH A WORKSPACE DIRECTORY OR SINGLE FILE
if os.path.isfile(fileOrWorkspacePath):
self.taskpaperPath = fileOrWorkspacePath
else:
self.workspaceRoot = fileOrWorkspacePath
self.taskpaperFiles = self._get_all_taskpaper_files()
return None
def sort(self):
"""
*sort the workspace or individual taskpaper document via the workflow tags found in the settings file*
**Usage:**
To sort all of the taskpaper documents in the workspace via the workflow tag set with the settings file, for example:
.. code-block:: yaml
workflowTags: "@due, @flag, @hold, @next, @someday, @wait"
use the ``sort()`` method:
.. code-block:: python
ws.sort()
"""
self.log.info('starting the ``sort`` method')
for f in self.taskpaperFiles:
self._sort_tp_file(f)
self.log.info('completed the ``sort`` method')
return None
def archive_done(
self):
"""*move done tasks from the document's 'Archive' project into an adjacent markdown tasklog file*
**Usage:**
To move the archived tasks within a workspace's taskpaper docs into ``-tasklog.md`` files use the ``archive_done()`` method:
.. code-block:: python
ws.archive_done()
"""
self.log.info('starting the ``archive_done`` method')
for f in self.taskpaperFiles:
self._archive_tp_file_done_tasks(f)
self.log.info('completed the ``archive_done`` method')
return None
def _get_all_taskpaper_files(
self):
"""*get a list of all the taskpaper filepaths in the workspace*
**Return:**
- ``taskpaperFiles`` -- a list of paths to all the taskpaper files within the workspace
"""
self.log.info('starting the ``_get_all_taskpaper_files`` method')
if self.workspaceRoot:
from fundamentals.files import recursive_directory_listing
theseFiles = recursive_directory_listing(
log=self.log,
baseFolderPath=self.workspaceRoot,
whatToList="files" # all | files | dirs
)
taskpaperFiles = []
taskpaperFiles[:] = [f for f in theseFiles if os.path.splitext(f)[
1] == ".taskpaper"]
else:
taskpaperFiles = [self.taskpaperPath]
self.log.info('completed the ``_get_all_taskpaper_files`` method')
return taskpaperFiles
def _sort_tp_file(
self,
taskpaperPath):
"""*sort individual taskpaper documents*
**Key Arguments:**
- ``taskpaperPath`` -- path to a taskpaper file
**Return:**
- None
"""
self.log.info('starting the ``_sort_tp_file`` method')
# OPEN TASKPAPER FILE
self.log.info("sorting taskpaper file %(taskpaperPath)s" % locals())
doc = document(taskpaperPath)
doc.tidy()
doc.sort_tasks(self.settings["workflowTags"])
doc.sort_projects(self.settings["workflowTags"])
doc.save()
self.log.info('completed the ``_sort_tp_file`` method')
return None
def _archive_tp_file_done_tasks(
self,
taskpaperPath):
"""* archive tp file done tasks*
**Key Arguments:**
- ``taskpaperPath`` -- path to a taskpaper file
**Return:**
- None
"""
self.log.info('starting the ``_archive_tp_file_done_tasks`` method')
self.log.info("archiving taskpaper file %(taskpaperPath)s" % locals())
taskLog = {}
mdArchiveFile = taskpaperPath.replace(".taskpaper", "-tasklog.md")
exists = os.path.exists(mdArchiveFile)
if exists:
pathToReadFile = mdArchiveFile
try:
self.log.debug("attempting to open the file %s" %
(pathToReadFile,))
readFile = codecs.open(
pathToReadFile, encoding='utf-8', mode='r')
thisData = readFile.read()
readFile.close()
except IOError, e:
message = 'could not open the file %s' % (pathToReadFile,)
self.log.critical(message)
raise IOError(message)
readFile.close()
table = False
for l in thisData.split("\n"):
l = l.encode("utf-8")
if ":---" in l:
table = True
continue
if table == True and len(l) and l[0] == "|":
dictt = collections.OrderedDict(sorted({}.items()))
columns = l.split("|")
dictt["task"] = columns[1].strip().decode("utf-8")
dictt["completed"] = columns[2].strip().decode("utf-8")
dictt["project"] = columns[3].strip().decode("utf-8")
taskLog[dictt["task"] + dictt["completed"] +
dictt["project"]] = dictt
doc = document(taskpaperPath)
aProject = doc.get_project("Archive")
if not aProject:
return
doneTasks = aProject.tagged_tasks("@done")
for task in doneTasks:
dateCompleted = ""
project = ""
for t in task.tags:
if "done" in t:
dateCompleted = t.replace("done", "").replace(
"(", "").replace(")", "")
if "project(" in t:
project = t.replace("project", "").replace(
"(", "").replace(")", "")
dictt = collections.OrderedDict(sorted({}.items()))
notes = ""
if task.notes:
for n in task.notes:
if len(notes) and notes[-2:] != ". ":
if notes[-1] == ".":
notes += " "
else:
notes += ". "
notes += n.title
if len(notes):
notes = "<br><br>**NOTES:**<br>" + \
"<br>".join(textwrap.wrap(
notes, 120, break_long_words=True))
dictt["task"] = "<br>".join(textwrap.wrap(task.title[
2:], 120, break_long_words=True)) + notes
dictt["task"] = dictt["task"].encode("utf-8")
dictt["completed"] = dateCompleted
dictt["project"] = project
# SET ENCODE ERROR RETURN VALUE
# RECODE INTO ASCII
dictt["task"] = dictt["task"].decode("utf-8")
dictt["completed"] = dictt["completed"].decode("utf-8")
dictt["project"] = dictt["project"].decode("utf-8")
taskLog[dictt["task"] + dictt["completed"] +
dictt["project"]] = dictt
taskLog = taskLog.values()
taskLog = sorted(taskLog, key=itemgetter('task'), reverse=True)
taskLog = sorted(taskLog, key=itemgetter('project'), reverse=True)
taskLog = sorted(taskLog, key=itemgetter('completed'), reverse=True)
dataSet = list_of_dictionaries(
log=self.log,
listOfDictionaries=taskLog
)
markdownData = dataSet.markdown(filepath=None)
try:
self.log.debug("attempting to open the file %s" % (mdArchiveFile,))
writeFile = codecs.open(mdArchiveFile, encoding='utf-8', mode='w')
except IOError, e:
message = 'could not open the file %s' % (mdArchiveFile,)
self.log.critical(message)
raise IOError(message)
writeFile.write(markdownData.decode("utf-8"))
writeFile.close()
aProject.delete()
doc.save()
self.log.info('completed the ``_archive_tp_file_done_tasks`` method')
return None
# use the tab-trigger below for new method
# xt-class-method
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.db.sqlalchemy import api as db_api
from heat.engine import clients
from heat.engine import resource
from heat.engine import scheduler
from heat.engine.resources import nova_utils
from heat.engine.resources.neutron import neutron
if clients.neutronclient is not None:
from neutronclient.common.exceptions import NeutronClientException
class HealthMonitor(neutron.NeutronResource):
"""
A resource for managing health monitors for load balancers in Neutron.
"""
properties_schema = {
'delay': {
'Type': 'Integer', 'Required': True,
'Description': _('The minimum time in seconds between regular '
'connections of the member.')},
'type': {
'Type': 'String', 'Required': True,
'AllowedValues': ['PING', 'TCP', 'HTTP', 'HTTPS'],
'Description': _('One of predefined health monitor types.')},
'max_retries': {
'Type': 'Integer', 'Required': True,
'Description': _('Number of permissible connection failures before'
' changing the member status to INACTIVE.')},
'timeout': {
'Type': 'Integer', 'Required': True,
'Description': _('Maximum number of seconds for a monitor to '
'wait for a connection to be established before '
'it times out.')},
'admin_state_up': {
'Default': True, 'Type': 'Boolean',
'Description': _('The administrative state of the health '
'monitor.')},
'http_method': {
'Type': 'String',
'Description': _('The HTTP method used for requests by the '
'monitor of type HTTP.')},
'expected_codes': {
'Type': 'String',
'Description': _('The list of HTTP status codes expected in '
'response from the member to declare it '
'healthy.')},
'url_path': {
'Type': 'String',
'Description': _('The HTTP path used in the HTTP request used '
'by the monitor to test a member health.')},
}
update_allowed_keys = ('Properties',)
update_allowed_properties = ('delay', 'max_retries', 'timeout',
'admin_state_up', 'http_method',
'expected_codes', 'url_path')
attributes_schema = {
'admin_state_up': _('The administrative state of this health '
'monitor.'),
'delay': _('The minimum time in seconds between regular connections '
'of the member.'),
'expected_codes': _('The list of HTTP status codes expected in '
'response from the member to declare it healthy.'),
'http_method': _('The HTTP method used for requests by the monitor of '
'type HTTP.'),
'max_retries': _('Number of permissible connection failures before '
'changing the member status to INACTIVE.'),
'timeout': _('Maximum number of seconds for a monitor to wait for a '
'connection to be established before it times out.'),
'type': _('One of predefined health monitor types.'),
'url_path': _('The HTTP path used in the HTTP request used by the '
'monitor to test a member health.'),
'tenant_id': _('Tenant owning the health monitor.'),
'show': _('All attributes.'),
}
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
health_monitor = self.neutron().create_health_monitor(
{'health_monitor': properties})['health_monitor']
self.resource_id_set(health_monitor['id'])
def _show_resource(self):
return self.neutron().show_health_monitor(
self.resource_id)['health_monitor']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.neutron().update_health_monitor(
self.resource_id, {'health_monitor': prop_diff})
def handle_delete(self):
try:
self.neutron().delete_health_monitor(self.resource_id)
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
else:
return scheduler.TaskRunner(self._confirm_delete)()
class Pool(neutron.NeutronResource):
"""
A resource for managing load balancer pools in Neutron.
"""
vip_schema = {
'name': {
'Type': 'String',
'Description': _('Name of the vip.')},
'description': {
'Type': 'String',
'Description': _('Description of the vip.')},
'address': {
'Type': 'String',
'Description': _('IP address of the vip.')},
'connection_limit': {
'Type': 'Integer',
'Description': _('The maximum number of connections per second '
'allowed for the vip.')},
'protocol_port': {
'Type': 'Integer', 'Required': True,
'Description': _('TCP port on which to listen for client traffic '
'that is associated with the vip address.')},
'admin_state_up': {
'Default': True, 'Type': 'Boolean',
'Description': _('The administrative state of this vip.')}
}
properties_schema = {
'protocol': {
'Type': 'String', 'Required': True,
'AllowedValues': ['TCP', 'HTTP', 'HTTPS'],
'Description': _('Protocol for balancing.')},
'subnet_id': {
'Type': 'String', 'Required': True,
'Description': _('The subnet on which the members of the pool '
'will be located.')},
'lb_method': {
'Type': 'String', 'Required': True,
'AllowedValues': ['ROUND_ROBIN', 'LEAST_CONNECTIONS',
'SOURCE_IP'],
'Description': _('The algorithm used to distribute load between '
'the members of the pool.')},
'name': {
'Type': 'String',
'Description': _('Name of the pool.')},
'description': {
'Type': 'String',
'Description': _('Description of the pool.')},
'admin_state_up': {
'Default': True, 'Type': 'Boolean',
'Description': _('The administrative state of this pool.')},
'vip': {
'Type': 'Map', 'Schema': vip_schema, 'Required': True,
'Description': _('IP address and port of the pool.')},
'monitors': {
'Type': 'List',
'Description': _('List of health monitors associated with the '
'pool.')},
}
update_allowed_keys = ('Properties',)
update_allowed_properties = ('description', 'admin_state_up', 'lb_method',
'monitors')
attributes_schema = {
'admin_state_up': _('The administrative state of this pool.'),
'name': _('Name of the pool.'),
'protocol': _('Protocol to balance.'),
'subnet_id': _('The subnet on which the members of the pool '
'will be located.'),
'lb_method': _('The algorithm used to distribute load between the '
'members of the pool.'),
'description': _('Description of the pool.'),
'tenant_id': _('Tenant owning the pool.'),
'vip': _('Vip associated with the pool.'),
}
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
vip_properties = properties.pop('vip')
monitors = properties.pop('monitors', [])
client = self.neutron()
pool = client.create_pool({'pool': properties})['pool']
self.resource_id_set(pool['id'])
for monitor in monitors:
client.associate_health_monitor(
pool['id'], {'health_monitor': {'id': monitor}})
vip_arguments = self.prepare_properties(
vip_properties,
'%s.vip' % (self.name,))
vip_arguments['protocol'] = self.properties['protocol']
vip_arguments['subnet_id'] = self.properties['subnet_id']
vip_arguments['pool_id'] = pool['id']
vip = client.create_vip({'vip': vip_arguments})['vip']
self.metadata = {'vip': vip['id']}
def _show_resource(self):
return self.neutron().show_pool(self.resource_id)['pool']
def check_create_complete(self, data):
attributes = self._show_resource()
if attributes['status'] == 'PENDING_CREATE':
return False
elif attributes['status'] == 'ACTIVE':
vip_attributes = self.neutron().show_vip(
self.metadata['vip'])['vip']
if vip_attributes['status'] == 'PENDING_CREATE':
return False
elif vip_attributes['status'] == 'ACTIVE':
return True
raise exception.Error(
'neutron reported unexpected vip resource[%s] status[%s]' %
(vip_attributes['name'], vip_attributes['status']))
raise exception.Error(
'neutron report unexpected pool resource[%s] status[%s]' %
(attributes['name'], attributes['status']))
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
client = self.neutron()
monitors = set(prop_diff.pop('monitors', []))
if monitors:
old_monitors = set(self.t['Properties']['monitors'])
for monitor in old_monitors - monitors:
client.disassociate_health_monitor(
self.resource_id, {'health_monitor': {'id': monitor}})
for monitor in monitors - old_monitors:
client.associate_health_monitor(
self.resource_id, {'health_monitor': {'id': monitor}})
if prop_diff:
client.update_pool(self.resource_id, {'pool': prop_diff})
def _resolve_attribute(self, name):
if name == 'vip':
return self.neutron().show_vip(self.metadata['vip'])['vip']
return super(Pool, self)._resolve_attribute(name)
def _confirm_vip_delete(self):
client = self.neutron()
while True:
try:
yield
client.show_vip(self.metadata['vip'])
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
break
self._delete_pool()
def _delete_pool(self):
try:
self.neutron().delete_pool(self.resource_id)
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
else:
return scheduler.TaskRunner(self._confirm_delete)()
def handle_delete(self):
if self.metadata:
try:
self.neutron().delete_vip(self.metadata['vip'])
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
self._delete_pool()
else:
return scheduler.TaskRunner(self._confirm_vip_delete)()
else:
self._delete_pool()
class LoadBalancer(resource.Resource):
"""
A resource to link a neutron pool with servers.
"""
properties_schema = {
'pool_id': {
'Type': 'String', 'Required': True,
'Description': _('The ID of the load balancing pool.')},
'protocol_port': {
'Type': 'Integer', 'Required': True,
'Description': _('Port number on which the servers are '
'running on the members.')},
'members': {
'Type': 'List',
'Description': _('The list of Nova server IDs load balanced.')},
}
update_allowed_keys = ('Properties',)
update_allowed_properties = ('members', 'pool_id',)
def handle_create(self):
pool = self.properties['pool_id']
client = self.neutron()
nova_client = self.nova()
protocol_port = self.properties['protocol_port']
for member in self.properties.get('members', []):
address = nova_utils.server_to_ipaddress(nova_client, member)
lb_member = client.create_member({
'member': {
'pool_id': pool,
'address': address,
'protocol_port': protocol_port}})['member']
db_api.resource_data_set(self, member, lb_member['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if 'members' in prop_diff:
members = set(prop_diff['members'])
old_members = set(self.t['Properties'].get('members', []))
client = self.neutron()
for member in old_members - members:
member_id = db_api.resource_data_get(self, member)
try:
client.delete_member(member_id)
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
db_api.resource_data_delete(self, member)
pool = self.properties['pool_id']
nova_client = self.nova()
protocol_port = self.properties['protocol_port']
for member in members - old_members:
address = nova_utils.server_to_ipaddress(nova_client, member)
lb_member = client.create_member({
'member': {
'pool_id': pool,
'address': address,
'protocol_port': protocol_port}})['member']
db_api.resource_data_set(self, member, lb_member['id'])
def handle_delete(self):
client = self.neutron()
for member in self.properties.get('members', []):
member_id = db_api.resource_data_get(self, member)
try:
client.delete_member(member_id)
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
db_api.resource_data_delete(self, member)
def resource_mapping():
if clients.neutronclient is None:
return {}
return {
'OS::Neutron::HealthMonitor': HealthMonitor,
'OS::Neutron::Pool': Pool,
'OS::Neutron::LoadBalancer': LoadBalancer,
}
| |
# coding=utf-8
"""
Diamond handler that check if values are too high or too low, if so send an
alert to a Sentry server
This handler requires the Python module Raven:
http://raven.readthedocs.org/en/latest/index.html
To work this handler need a similar configuration:
[[SentryHandler]]
# Create a new project in Sentry and copy the DSN here:
dsn = http://user:pass@hostname/id
[[[load]]]
name = Load Average
# check for load average of the last 15 minutes
path = loadavg.15
max = 8.5
[[[free_memory]]]
name = Free Memory
path = memory.MemFree
min = 66020000
"""
__author__ = 'Bruno Clermont'
__email__ = 'bruno.clermont@gmail.com'
import logging
import re
from Handler import Handler
from diamond.collector import get_hostname
from configobj import Section
try:
import raven.handlers.logging
except ImportError:
raven = None
class InvalidRule(ValueError):
"""
invalid rule
"""
pass
class BaseResult(object):
"""
Base class for a Rule minimum/maximum check result
"""
adjective = None
def __init__(self, value, threshold):
"""
@type value: float
@param value: metric value
@type threshold: float
@param threshold: value that trigger a warning
"""
self.value = value
self.threshold = threshold
if not raven:
self.log.error('raven.handlers.logging import failed. '
'Handler disabled')
self.enabled = False
return
@property
def verbose_message(self):
"""return more complete message"""
if self.threshold is None:
return 'No threshold'
return '%.1f is %s than %.1f' % (self.value,
self.adjective,
self.threshold)
@property
def _is_error(self):
raise NotImplementedError('_is_error')
@property
def is_error(self):
"""
for some reason python do this:
>>> 1.0 > None
True
>>> 1.0 < None
False
so we just check if min/max is not None before return _is_error
"""
if self.threshold is None:
return False
return self._is_error
def __str__(self):
name = self.__class__.__name__.lower()
if self.threshold is None:
return '%s: %.1f no threshold' % (name, self.value)
return '%.1f (%s: %.1f)' % (self.value, name, self.threshold)
class Minimum(BaseResult):
"""
Minimum result
"""
adjective = 'lower'
@property
def _is_error(self):
"""if it's too low"""
return self.value < self.threshold
class Maximum(BaseResult):
"""
Maximum result
"""
adjective = 'higher'
@property
def _is_error(self):
"""if it's too high"""
return self.value > self.threshold
class Rule(object):
"""
Alert rule
"""
def __init__(self, name, path, min=None, max=None):
"""
@type name: string
@param name: rule name, used to identify this rule in Sentry
@type path: string
@param path: un-compiled regular expression of the path of the rule
@type min: string of float/int, int or float. will be convert to float
@param min: optional minimal value that if value goes below it send
an alert to Sentry
@type max: string of float/int, int or float. will be convert to float
@param max: optional maximal value that if value goes over it send
an alert to Sentry
"""
self.name = name
# counters that can be used to debug rule
self.counter_errors = 0
self.counter_pass = 0
# force min and max to be float
try:
self.min = float(min)
except TypeError:
self.min = None
try:
self.max = float(max)
except TypeError:
self.max = None
if self.min is None and self.max is None:
raise InvalidRule("%s: %s: both min and max are unset or invalid"
% (name, path))
if self.min is not None and self.max is not None:
if self.min > self.max:
raise InvalidRule("min %.1f is larger than max %.1f" % (
self.min, self.max))
# compile path regular expression
self.regexp = re.compile(r'(?P<prefix>.*)\.(?P<path>%s)$' % path)
def process(self, metric, handler):
"""
process a single diamond metric
@type metric: diamond.metric.Metric
@param metric: metric to process
@type handler: diamond.handler.sentry.SentryHandler
@param handler: configured Sentry graphite handler
@rtype None
"""
match = self.regexp.match(metric.path)
if match:
minimum = Minimum(metric.value, self.min)
maximum = Maximum(metric.value, self.max)
if minimum.is_error or maximum.is_error:
self.counter_errors += 1
message = "%s Warning on %s: %.1f" % (self.name,
handler.hostname,
metric.value)
culprit = "%s %s" % (handler.hostname, match.group('path'))
handler.raven_logger.error(message, extra={
'culprit': culprit,
'data': {
'metric prefix': match.group('prefix'),
'metric path': match.group('path'),
'minimum check': minimum.verbose_message,
'maximum check': maximum.verbose_message,
'metric original path': metric.path,
'metric value': metric.value,
'metric precision': metric.precision,
'metric timestamp': metric.timestamp,
'minimum threshold': self.min,
'maximum threshold': self.max,
'path regular expression': self.regexp.pattern,
'total errors': self.counter_errors,
'total pass': self.counter_pass,
'hostname': handler.hostname
}
}
)
else:
self.counter_pass += 1
def __repr__(self):
return '%s: min:%s max:%s %s' % (self.name, self.min, self.max,
self.regexp.pattern)
class SentryHandler(Handler):
"""
Diamond handler that check if a metric goes too low or too high
"""
# valid key name in rules sub-section
VALID_RULES_KEYS = ('name', 'path', 'min', 'max')
def __init__(self, config=None):
"""
@type config: configobj.ConfigObj
"""
Handler.__init__(self, config)
if not raven:
return
# init sentry/raven
self.sentry_log_handler = raven.handlers.logging.SentryHandler(
self.config['dsn'])
self.raven_logger = logging.getLogger(self.__class__.__name__)
self.raven_logger.addHandler(self.sentry_log_handler)
self.configure_sentry_errors()
self.rules = self.compile_rules()
self.hostname = get_hostname(self.config)
if not len(self.rules):
self.log.warning("No rules, this graphite handler is unused")
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(SentryHandler, self).get_default_config_help()
config.update({
'dsn': '',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(SentryHandler, self).get_default_config()
config.update({
'dsn': '',
})
return config
def compile_rules(self):
"""
Compile alert rules
@rtype list of Rules
"""
output = []
# validate configuration, skip invalid section
for key_name, section in self.config.items():
rule = self.compile_section(section)
if rule is not None:
output.append(rule)
return output
def compile_section(self, section):
"""
Validate if a section is a valid rule
@type section: configobj.Section
@param section: section to validate
@rtype Rule or None
@return None if invalid
"""
if section.__class__ != Section:
# not a section, just skip
return
# name and path are mandatory
keys = section.keys()
for key in ('name', 'path'):
if key not in keys:
self.log.warning("section %s miss key '%s' ignore", key,
section.name)
return
# just warn if invalid key in section
for key in keys:
if key not in self.VALID_RULES_KEYS:
self.log.warning("invalid key %s in section %s",
key, section.name)
# need at least a min or a max
if 'min' not in keys and 'max' not in keys:
self.log.warning("either 'min' or 'max' is defined in %s",
section.name)
return
# add rule to the list
kwargs = {
'name': section['name'],
'path': section['path']
}
for argument in ('min', 'max'):
try:
kwargs[argument] = section[argument]
except KeyError:
pass
# init rule
try:
return Rule(**kwargs)
except InvalidRule, err:
self.log.error(str(err))
def configure_sentry_errors(self):
"""
Configure sentry.errors to use the same loggers as the root handler
@rtype: None
"""
sentry_errors_logger = logging.getLogger('sentry.errors')
root_logger = logging.getLogger()
for handler in root_logger.handlers:
sentry_errors_logger.addHandler(handler)
def process(self, metric):
"""
process a single metric
@type metric: diamond.metric.Metric
@param metric: metric to process
@rtype None
"""
for rule in self.rules:
rule.process(metric, self)
def __repr__(self):
return "SentryHandler '%s' %d rules" % (
self.sentry_log_handler.client.servers, len(self.rules))
| |
from __future__ import print_function, unicode_literals, with_statement, division
from os import path, system, listdir, sys, mkdir
from django.conf import settings
# VIEW CONSTS
LIST_VIEW = """
from %(app)s.forms import %(model)sForm
def %(lower_model)s_list(request, template='%(lower_model)s/list.html'):
d = {}
d['form'] = %(model)sForm()
if request.method == 'POST':
form = %(model)sForm(request.POST)
if form.is_valid():
item = form.save()
return JsonResponse(data={'id': item.id, 'name': str(item), 'form': %(model)sForm().as_p(), 'token': get_token(request)})
else:
d['form'] = form
return JsonResponse(data={'form': d['form'].as_p(), 'token': get_token(request)}, success=False)
d['%(lower_model)s_list'] = %(model)s.objects.all()
return render(request, template, d)
"""
DETAILS_VIEW = """
from %(app)s.forms import %(model)sForm
def %(lower_model)s_details(request, id, template='%(lower_model)s/details.html'):
d = {}
item = get_object_or_404(%(model)s, pk=id)
d['form'] = %(model)sForm(instance=item)
if request.method == 'POST':
form = %(model)sForm(request.POST, instance=item)
if form.is_valid():
item = form.save()
return JsonResponse(data={'form': %(model)sForm(instance=item).as_p(), 'token': get_token(request)})
else:
d['form'] = form
return JsonResponse(data={'form': d['form'].as_p(), 'token': get_token(request)}, success=False)
d['%(lower_model)s'] = %(model)s.objects.get(pk=id)
return render(request, template, d)
"""
DELETE_VIEW = """
def %(lower_model)s_delete(request, id):
item = %(model)s.objects.get(pk=id)
item.delete()
return JsonResponse()
"""
# MODELS CONSTS
MODEL_TEMPLATE = """
class %s(models.Model):
%s
update_date = models.DateTimeField(auto_now=True)
create_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-id']
"""
IMPORT_MODEL_TEMPLATE = """from %(app)s.models import %(model)s"""
CHARFIELD_TEMPLATE = """
%(name)s = models.CharField(max_length=%(length)s, null=%(null)s, blank=%(null)s)
"""
TEXTFIELD_TEMPLATE = """
%(name)s = models.TextField(null=%(null)s, blank=%(null)s)
"""
INTEGERFIELD_TEMPLATE = """
%(name)s = models.IntegerField(null=%(null)s, default=%(default)s)
"""
DECIMALFIELD_TEMPLATE = """
%(name)s = models.DecimalField(max_digits=%(digits)s, decimal_places=%(places)s, null=%(null)s, default=%(default)s)
"""
DATETIMEFIELD_TEMPLATE = """
%(name)s = models.DateTimeField(null=%(null)s, default=%(default)s)
"""
FOREIGNFIELD_TEMPLATE = """
%(name)s = models.ForeignKey(%(foreign)s, null=%(null)s, blank=%(null)s)
"""
TEMPLATE_LIST_CONTENT = """
{%% extends "base.html" %%}
{%% block page-title %%}%(title)s{%% endblock %%}
{%% block content %%}
<h1>%(model)s list</h1><br />
<table style="border: solid 1px gray; width: 300px; text-align: center;" id="item-list">
<tr style="background-color: #DDD">
<th style="padding: 10px;">ID</th>
<th>Name</th>
<th>Action</th>
</tr>
{%% for item in %(model)s_list %%}
<tr>
<td style="padding: 10px;">{{ item.id }}</td>
<td>{{ item }}</td>
<td><a href="{%% url '%(model)s-details' item.id %%}">show</a></td>
</tr>
{%% endfor %%}
</table>
<br />
<input type="button" onclick="$('#add-form-div').toggle();" value="Add new %(model)s"><br /><br />
<div id="add-form-div" style="display: none;">
<form action="{%% url '%(model)s-list' %%}" method="POST" id="add-form">
<div id="form-fields">
{%% csrf_token %%}
{{ form }}
</div>
<input type="submit" value="Submit" />
</form>
</div>
<script type="text/javascript">
(new FormHelper('add-form')).bind_for_ajax(
function(data) {
$('#item-list').append('<td style="padding: 10px;">' + data.id + '</td><td>' + data.name + '</td><td><a href="{%% url %(model)s-list %%}' + data.id + '/">show</a></td>').hide().fadeIn();
$('#form-fields').html('');
$('#form-fields').append('<input type="hidden" value="' + data.token + '" name="csrfmiddlewaretoken">');
$('#form-fields').append(data.form);
$('#add-form-div').toggle();
},
function(data) {
$('#form-fields').html('');
$('#form-fields').append('<input type="hidden" value="' + data.token + '" name="csrfmiddlewaretoken">');
$('#form-fields').append(data.form).hide().fadeIn();
$('#add-form input[type=submit]').removeAttr('disabled');
}
);
</script>
{%% endblock %%}
"""
TEMPLATE_DETAILS_CONTENT = """
{%% extends "base.html" %%}
{%% block page-title %%}%(title)s - {{ %(model)s }} {%% endblock %%}
{%% block content %%}
<div class="item">
<h1>%(model)s - {{ %(model)s }} </h1><br />
<table style="border: solid 1px gray; width: 300px; text-align: center;" id="item-list">
<tr style="background-color: #DDD">
<th style="padding: 10px;">ID</th>
<th>Name</th>
<th>Action</th>
</tr>
<tr>
<td style="padding: 10px;">{{ %(model)s.id }}</td>
<td>{{ %(model)s }}</td>
<td><input type="button" href="{%% url '%(model)s-delete' %(model)s.id %%}" id="delete-item" value="delete" /></td>
</tr>
</table>
<br />
<br />
<br />
<input type="button" onclick="$('#add-form-div').toggle();" value="Edit %(model)s"><br /><br />
<div id="add-form-div" style="display: none;">
<form action="{%% url '%(model)s-details' %(model)s.id %%}" method="POST" id="add-form">
<div id="form-fields">
{%% csrf_token %%}
{{ form }}
</div>
<input type="submit" value="Submit" />
</form>
</div>
</div>
<script type="text/javascript">
(new FormHelper('add-form')).bind_for_ajax(
function(data) {
$('#form-fields').html('');
$('#form-fields').append('<input type="hidden" value="' + data.token + '" name="csrfmiddlewaretoken">');
$('#form-fields').append(data.form).hide().fadeIn();
$('#add-form input[type=submit]').removeAttr('disabled');
},
function(data) {
$('#form-fields').html('');
$('#form-fields').append('<input type="hidden" value="' + data.token + '" name="csrfmiddlewaretoken">');
$('#form-fields').append(data.form).hide().fadeIn();
$('#add-form input[type=submit]').removeAttr('disabled');
}
);
$('#delete-item').click(function() {
$.get($(this).attr('href'), function(data) {
$('div.item').before('<h1>Item removed</h1><br /><br />');
$('div.item').remove();
});
});
</script>
<a href="{%% url '%(model)s-list' %%}">back to list</a>
{%% endblock %%}
"""
URL_CONTENT = """
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from %(app)s import views
urlpatterns = [
url(r'^%(model)s/$', views.%(model)s_list, name='%(model)s-list'),
url(r'^%(model)s/(?P<id>\d+)/$', views.%(model)s_details, name='%(model)s-details'),
url(r'^%(model)s/(?P<id>\d+)/delete/$', views.%(model)s_delete, name='%(model)s-delete'),
]
"""
URL_EXISTS_CONTENT = """
url(r'^%(model)s/$', views.%(model)s_list, name='%(model)s-list'),
url(r'^%(model)s/(?P<id>\d+)/$', views.%(model)s_details, name='%(model)s-details'),
url(r'^%(model)s/(?P<id>\d+)/delete/$', views.%(model)s_delete, name='%(model)s-delete'),
"""
ADMIN_CONTENT = """
from %(app)s.models import %(model)s
admin.site.register(%(model)s)
"""
FORM_CONTENT = """
from %(app)s.models import %(model)s
class %(model)sForm(forms.ModelForm):
class Meta:
model = %(model)s
"""
TESTS_CONTENT = """
from %(app)s.models import %(model)s
class %(model)sTest(TestCase):
def setUp(self):
self.user = User.objects.create(username='test_user')
def tearDown(self):
self.user.delete()
def test_list(self):
response = self.client.get(reverse('%(lower_model)s-list'))
self.failUnlessEqual(response.status_code, 200)
def test_crud(self):
# Create new instance
response = self.client.post(reverse('%(lower_model)s-list'), {})
self.assertContains(response, '"success": true')
# Read instance
items = %(model)s.objects.all()
self.failUnlessEqual(items.count(), 1)
item = items[0]
response = self.client.get(reverse('%(lower_model)s-details', kwargs={'id': item.id}))
self.failUnlessEqual(response.status_code, 200)
# Update instance
response = self.client.post(reverse('%(lower_model)s-details', kwargs={'id': item.id}), {})
self.assertContains(response, '"success": true')
# Delete instance
response = self.client.post(reverse('%(lower_model)s-delete', kwargs={'id': item.id}), {})
self.assertContains(response, '"success": true')
items = %(model)s.objects.all()
self.failUnlessEqual(items.count(), 0)
"""
class Scaffold(object):
def _info(self, msg, indent=0):
print("{0} {1}".format("\t" * int(indent), msg))
def __init__(self, app, model, fields):
self.app = app
self.model = model
self.fields = fields
try:
self.SCAFFOLD_APPS_DIR = settings.SCAFFOLD_APPS_DIR
except:
self.SCAFFOLD_APPS_DIR = './'
def get_import(self, model):
for dir in listdir(self.SCAFFOLD_APPS_DIR):
if path.isdir('{0}{1}'.format(self.SCAFFOLD_APPS_DIR, dir)) \
and path.exists('{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, dir)):
with open('{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, dir), 'r') as fp:
# Check if model exists
for line in fp.readlines():
if 'class {0}(models.Model)'.format(model) in line:
# print "Foreign key '%s' was found in app %s..." % (model, dir)
return IMPORT_MODEL_TEMPLATE % {'app': dir, 'model': model}
return None
def is_imported(self, path, model):
with open(path, 'r') as import_file:
for line in import_file.readlines():
if 'import {0}'.format(model) in line:
# print "Foreign key '%s' was found in models.py..." % (foreign)
return True
return False
def add_global_view_imports(self, path):
# from django.shortcuts import render, redirect, get_object_or_404, get_list_or_404
import_list = list()
with open(path, 'r') as import_file:
need_import_shortcut = True
need_import_urlresolvers = True
need_import_users = True
need_import_token = True
need_import_JsonResponse = True
for line in import_file.readlines():
if 'from django.shortcuts import render, redirect, get_object_or_404' in line:
need_import_shortcut = False
if 'from django.core.urlresolvers import reverse' in line:
need_import_urlresolvers = False
if 'from django.contrib.auth.models import User, Group' in line:
need_import_users = False
if 'from django.middleware.csrf import get_token' in line:
need_import_token = False
if 'from django_common.http import JsonResponse' in line:
need_import_JsonResponse = False
if need_import_shortcut:
import_list.append(
'from django.shortcuts import render, redirect, get_object_or_404')
if need_import_urlresolvers:
import_list.append('from django.core.urlresolvers import reverse')
if need_import_users:
import_list.append('from django.contrib.auth.models import User, Group')
if need_import_token:
import_list.append('from django.middleware.csrf import get_token')
if need_import_JsonResponse:
import_list.append('from django_common.http import JsonResponse')
return import_list
def view_exists(self, path, view):
# Check if view already exists
with open(path, 'r') as view_file:
for line in view_file.readlines():
if 'def {0}('.format(view) in line:
return True
return False
def get_field(self, field):
field = field.split(':')
field_type = field[0]
if field_type.lower() == 'char':
try:
length = field[2]
except IndexError:
length = 255
try:
null = field[3]
null = 'False'
except IndexError:
null = 'True'
return CHARFIELD_TEMPLATE % {'name': field[1], 'length': length, 'null': null}
elif field_type.lower() == 'text':
try:
null = field[2]
null = 'False'
except IndexError:
null = 'True'
return TEXTFIELD_TEMPLATE % {'name': field[1], 'null': null}
elif field_type.lower() == 'int':
try:
null = field[2]
null = 'False'
except IndexError:
null = 'True'
try:
default = field[3]
except IndexError:
default = None
return INTEGERFIELD_TEMPLATE % {'name': field[1], 'null': null, 'default': default}
elif field_type.lower() == 'decimal':
try:
null = field[4]
null = 'False'
except IndexError:
null = 'True'
try:
default = field[5]
except IndexError:
default = None
return DECIMALFIELD_TEMPLATE % {
'name': field[1],
'digits': field[2],
'places': field[3],
'null': null,
'default': default,
}
elif field_type.lower() == 'datetime':
try:
null = field[2]
null = 'False'
except IndexError:
null = 'True'
try:
default = field[3]
except IndexError:
default = None
return DATETIMEFIELD_TEMPLATE % {'name': field[1], 'null': null, 'default': default}
elif field_type.lower() == 'foreign':
foreign = field[2]
name = field[1]
# Check if this foreign key is already in models.py
if foreign in ('User', 'Group'):
if not self.is_imported('{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR,
self.app), foreign):
self.imports.append('\nfrom django.contrib.auth.models import User, Group\n')
return FOREIGNFIELD_TEMPLATE % {'name': name, 'foreign': foreign, 'null': 'True'}
if self.is_imported('{0}{1}/models.py'.format(
self.SCAFFOLD_APPS_DIR, self.app), foreign):
return FOREIGNFIELD_TEMPLATE % {'name': name, 'foreign': foreign, 'null': 'True'}
# Check imports
if self.get_import(foreign):
self.imports.append(self.get_import(foreign))
return FOREIGNFIELD_TEMPLATE % {'name': name, 'foreign': foreign, 'null': 'True'}
self._info('error\t{0}{1}/models.py\t{2} class not found'.format(
self.SCAFFOLD_APPS_DIR, self.app, foreign), 1)
return None
def create_app(self):
self._info(" App ")
self._info("===========")
if self.SCAFFOLD_APPS_DIR and not path.exists('{0}'.format(self.SCAFFOLD_APPS_DIR)):
raise Exception(
"SCAFFOLD_APPS_DIR {0} does not exists".format(self.SCAFFOLD_APPS_DIR))
if not path.exists('{0}{1}'.format(self.SCAFFOLD_APPS_DIR, self.app)):
system('python manage.py startapp {0}'.format(self.app))
system('mv {0} {1}{2}'.format(self.app, self.SCAFFOLD_APPS_DIR, self.app))
self._info("create\t{0}{1}".format(self.SCAFFOLD_APPS_DIR, self.app), 1)
else:
self._info("exists\t{0}{1}".format(self.SCAFFOLD_APPS_DIR, self.app), 1)
def create_views(self):
self._info(" Views ")
self._info("===========")
# Open models.py to read
view_path = '{0}{1}/views.py'.format(self.SCAFFOLD_APPS_DIR, self.app)
# Check if urls.py exists
if path.exists('{0}{1}/views.py'.format(self.SCAFFOLD_APPS_DIR, self.app)):
self._info('exists\t{0}{1}/views.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
else:
with open("{0}{1}/views.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w'):
self._info('create\t{0}{1}/views.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
import_list = list()
view_list = list()
# Add global imports
import_list.append('\n'.join(imp for imp in self.add_global_view_imports(view_path)))
# Add model imports
if not self.is_imported(view_path, self.model):
import_list.append(self.get_import(self.model))
lower_model = self.model.lower()
# Check if view already exists
if not self.view_exists(view_path, "{0}_list".format(lower_model)):
view_list.append(LIST_VIEW % {
'lower_model': lower_model,
'model': self.model,
'app': self.app,
})
self._info("added \t{0}\t{1}_view".format(view_path, lower_model), 1)
else:
self._info("exists\t{0}\t{1}_view".format(view_path, lower_model), 1)
if not self.view_exists(view_path, "{0}_details".format(lower_model)):
view_list.append(DETAILS_VIEW % {
'lower_model': lower_model,
'model': self.model,
'app': self.app,
})
self._info("added \t{0}\t{1}_details".format(view_path, lower_model), 1)
else:
self._info("exists\t{0}\t{1}_details".format(view_path, lower_model), 1)
if not self.view_exists(view_path, "{0}_delete".format(lower_model)):
view_list.append(DELETE_VIEW % {
'lower_model': lower_model,
'model': self.model,
})
self._info("added \t{0}\t{1}_delete".format(view_path, lower_model), 1)
else:
self._info("exists\t{0}\t{1}_delete".format(view_path, lower_model), 1)
# Open views.py to append
with open(view_path, 'a') as view_file:
view_file.write('\n'.join([import_line for import_line in import_list]))
view_file.write(''.join([view for view in view_list]))
def create_model(self):
self._info(" Model ")
self._info("===========")
# Open models.py to read
with open('{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp:
self.models_file = fp
# Check if model already exists
for line in self.models_file.readlines():
if 'class {0}'.format(self.model) in line:
self._info('exists\t{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
return
self._info('create\t{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
# Prepare fields
self.imports = []
fields = []
for field in self.fields:
new_field = self.get_field(field)
if new_field:
fields.append(new_field)
self._info('added\t{0}{1}/models.py\t{2} field'.format(
self.SCAFFOLD_APPS_DIR, self.app, field.split(':')[1]), 1)
# Open models.py to append
with open('{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 'a') as fp:
fp.write(''.join([import_line for import_line in self.imports]))
fp.write(MODEL_TEMPLATE % (self.model, ''.join(field for field in fields)))
def create_templates(self):
self._info(" Templates ")
self._info("===========")
# Check if template dir exists
if path.exists('{0}{1}/templates/'.format(self.SCAFFOLD_APPS_DIR, self.app)):
self._info('exists\t{0}{1}/templates/'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
else:
mkdir("{0}{1}/templates/".format(self.SCAFFOLD_APPS_DIR, self.app))
self._info('create\t{0}{1}/templates/'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
# Check if model template dir exists
if path.exists('{0}{1}/templates/{2}/'.format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower())):
self._info('exists\t{0}{1}/templates/{2}/'.format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower()), 1)
else:
mkdir("{0}{1}/templates/{2}/".format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower()))
self._info('create\t{0}{1}/templates/{2}/'.format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1)
# Check if list.html exists
if path.exists('{0}{1}/templates/{2}/list.html'.format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower())):
self._info('exists\t{0}{1}/templates/{2}/list.html'.format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1)
else:
with open("{0}{1}/templates/{2}/list.html".format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower()), 'w') as fp:
fp.write(TEMPLATE_LIST_CONTENT % {
'model': self.model.lower(),
'title': self.model.lower(),
})
self._info('create\t{0}{1}/templates/{2}/list.html'.format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1)
# Check if details.html exists
if path.exists('{0}{1}/templates/{2}/details.html'.format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower())):
self._info('exists\t{0}{1}/templates/{2}/details.html'.format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1)
else:
with open("{0}{1}/templates/{2}/details.html".format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 'w') as fp:
fp.write(TEMPLATE_DETAILS_CONTENT % {
'model': self.model.lower(),
'title': self.model.lower(),
})
self._info('create\t{0}{1}/templates/{2}/details.html'.format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1)
def create_urls(self):
self._info(" URLs ")
self._info("===========")
# Check if urls.py exists
if path.exists('{0}{1}/urls.py'.format(self.SCAFFOLD_APPS_DIR, self.app)):
# If does we need to add urls
new_urls = ''
with open("{0}{1}/urls.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp:
for line in fp.readlines():
new_urls += line
if 'urlpatterns' in line:
new_urls += URL_EXISTS_CONTENT % {
'app': self.app,
'model': self.model.lower(),
}
with open("{0}{1}/urls.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w') as fp:
fp.write(new_urls)
self._info('update\t{0}{1}/urls.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
else:
with open("{0}{1}/urls.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w') as fp:
fp.write(URL_CONTENT % {
'app': self.app,
'model': self.model.lower(),
})
self._info('create\t{0}{1}/urls.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
def create_admin(self):
self._info(" Admin ")
self._info("===========")
# Check if admin.py exists
if path.exists('{0}{1}/admin.py'.format(self.SCAFFOLD_APPS_DIR, self.app)):
self._info('exists\t{0}{1}/admin.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
else:
with open("{0}{1}/admin.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w') as fp:
fp.write("from django.contrib import admin\n")
self._info('create\t{0}{1}/urls.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
# Check if admin entry already exists
with open("{0}{1}/admin.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp:
content = fp.read()
if "admin.site.register({0})".format(self.model) in content:
self._info('exists\t{0}{1}/admin.py\t{2}'.format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower()), 1)
else:
with open("{0}{1}/admin.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'a') as fp:
fp.write(ADMIN_CONTENT % {'app': self.app, 'model': self.model})
self._info('added\t{0}{1}/admin.py\t{2}'.format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower()), 1)
def create_forms(self):
self._info(" Forms ")
self._info("===========")
# Check if forms.py exists
if path.exists('{0}{1}/forms.py'.format(self.SCAFFOLD_APPS_DIR, self.app)):
self._info('exists\t{0}{1}/forms.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
else:
with open("{0}{1}/forms.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w') as fp:
fp.write("from django import forms\n")
self._info('create\t{0}{1}/forms.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
# Check if form entry already exists
with open("{0}{1}/forms.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp:
content = fp.read()
if "class {0}Form".format(self.model) in content:
self._info('exists\t{0}{1}/forms.py\t{2}'.format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1)
else:
with open("{0}{1}/forms.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'a') as fp:
fp.write(FORM_CONTENT % {'app': self.app, 'model': self.model})
self._info('added\t{0}{1}/forms.py\t{2}'.format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1)
def create_tests(self):
self._info(" Tests ")
self._info("===========")
# Check if tests.py exists
if path.exists('{0}{1}/tests.py'.format(self.SCAFFOLD_APPS_DIR, self.app)):
self._info('exists\t{0}{1}/tests.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
# Check if imports exists:
import_testcase = True
import_user = True
import_reverse = True
with open("{0}{1}/tests.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp:
for line in fp.readlines():
if 'import TestCase' in line:
import_testcase = False
if 'import User' in line:
import_user = False
if 'import reverse' in line:
import_reverse = False
with open("{0}{1}/tests.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'a') as fp:
if import_testcase:
fp.write("from django.test import TestCase\n")
if import_user:
fp.write("from django.contrib.auth.models import User\n")
if import_reverse:
fp.write("from django.core.urlresolvers import reverse\n")
else:
with open("{0}{1}/tests.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w') as fp:
fp.write("from django.test import TestCase\n")
fp.write("from django.contrib.auth.models import User\n")
fp.write("from django.core.urlresolvers import reverse\n")
self._info('create\t{0}{1}/tests.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
# Check if test class already exists
with open("{0}{1}/tests.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp:
content = fp.read()
if "class {0}Test".format(self.model) in content:
self._info('exists\t{0}{1}/tests.py\t{2}'.format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1)
else:
with open("{0}{1}/tests.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'a') as fp:
fp.write(TESTS_CONTENT % {
'app': self.app,
'model': self.model,
'lower_model': self.model.lower(),
})
self._info('added\t{0}{1}/tests.py\t{2}'.format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower()), 1)
def run(self):
if not self.app:
sys.exit("No application name found...")
if not self.app.isalnum():
sys.exit("Model name should be alphanumerical...")
self.create_app()
if self.model:
self.create_model()
self.create_views()
self.create_admin()
self.create_forms()
self.create_urls()
self.create_templates()
self.create_tests()
| |
# Copyright 2011 Viewfinder Inc. All Rights Reserved.
"""Utility tests.
ParseHostPort(): parses host:port string and returns tuple
"""
__authors__ = ['spencer@emailscrubbed.com (Spencer Kimball)'
'andy@emailscrubbed.com (Andy Kimball)']
import logging
import time
import unittest
from datetime import date, timedelta
from functools import partial
from viewfinder.backend.base import util, testing
class BarrierTestCase(testing.BaseTestCase):
"""Tests for basic barrier type."""
def testBarrier(self):
val = [False]
def _Callback():
val[0] = True
self.stop()
with util.Barrier(_Callback) as b:
cb1 = b.Callback()
cb2 = b.Callback()
cb1()
self.io_loop.add_callback(self.stop)
self.wait()
self.assertFalse(val[0])
cb2()
self.wait()
self.assertTrue(val[0])
def testEmptyBarrier(self):
val = [False]
def _Callback():
val[0] = True
self.stop()
with util.Barrier(_Callback):
pass
self.wait()
self.assertTrue(val[0])
def testEmptyBarrierException(self):
val = [False]
def _Exception(type_, value_, traceback):
print "Exception"
self.io_loop.add_callback(self.stop)
def _Completed():
print "Completed"
val[0] = True
with util.Barrier(_Completed, _Exception):
raise KeyError('Key')
self.wait()
self.assertFalse(val[0], 'Barrier complete method was called unexpectedly.')
def testCompletedBeforeException(self):
"""Make the barrier callback and then raise exception."""
val = [0]
def _Exception(type_, value_, traceback):
logging.info("Exception")
val[0] += 1
def _Completed():
logging.info("Completed")
val[0] += 1
def _RaiseException():
raise KeyError('key')
def _PropException(type_, value_, traceback):
self.io_loop.add_callback(self.stop)
with util.ExceptionBarrier(_PropException):
with util.Barrier(_Completed, _Exception):
self.io_loop.add_callback(_RaiseException)
self.wait()
self.assertEqual(val[0], 1, 'Both _Completed and _Exception were called.')
def testCompletedAfterException(self):
"""Raise exception and then make the barrier callback."""
val = [0]
def _Exception(type_, value_, traceback):
logging.info("Exception")
val[0] += 1
self.io_loop.add_callback(self.stop)
def _Completed():
logging.info("Completed")
val[0] += 1
self.io_loop.add_callback(self.stop)
def _RaiseException(completed_cb):
self.io_loop.add_callback(partial(completed_cb, 1))
raise KeyError('key')
with util.ArrayBarrier(_Completed, on_exception=_Exception) as b:
self.io_loop.add_callback(partial(_RaiseException, b.Callback()))
self.io_loop.add_callback(partial(_RaiseException, b.Callback()))
self.wait()
self.assertEqual(val[0], 1, 'Both _Completed and _Exception were called.')
class MonoBarrierTestCase(testing.BaseTestCase):
"""Tests for MonoBarrier barrier type."""
def testBarrier(self):
val = []
def _Callback(result):
val.append(result)
self.stop()
with util.MonoBarrier(_Callback) as b:
cb = b.Callback()
self.assertRaises(Exception, b.Callback)
cb(1)
self.wait()
self.assertEqual(1, val[0])
def testEmptyBarrier(self):
val = [False]
def _Callback(result):
self.assertEqual(result, None)
val[0] = True
self.stop()
with util.MonoBarrier(_Callback):
pass
self.wait()
self.assertTrue(val[0])
def testCallbackPositionalArguments(self):
val = [0]
def _Callback(arg1, arg2):
self.stop()
self.assertEqual(arg1, 'arg1')
self.assertEqual(arg2, 'arg2')
val[0] = 1
def _Exception(type_, instance_, traceback):
self.stop()
self.assertTrue(type_ is TypeError)
val[0] = 2
with util.MonoBarrier(_Callback) as b:
b.Callback()('arg1', 'arg2')
self.wait()
self.assertEqual(1, val[0])
with util.Barrier(_Callback, on_exception=_Exception) as b1:
with util.MonoBarrier(_Callback) as b2:
b2.Callback()(b1.Callback())
self.wait()
self.assertEqual(2, val[0])
class ResultsBarrierTestCase(testing.BaseTestCase):
"""Tests for Results barrier."""
def testResultsBarrier(self):
val = [False]
def _Callback(exp_results, results):
self.stop()
self.assertEqual(results, exp_results)
val[0] = True
with util.ArrayBarrier(partial(_Callback, [1, 2, 3])) as b:
b.Callback()(1)
b.Callback()(2)
b.Callback()(3)
self.wait()
self.assertTrue(val[0])
def testEmptyBarrier(self):
val = [False]
def _Callback(exp_results, results):
self.stop()
self.assertEqual(results, exp_results)
val[0] = True
with util.ArrayBarrier(partial(_Callback, [])):
pass
self.wait()
self.assertTrue(val[0])
def testCompact(self):
val = [False]
def _Callback(exp_results, results):
self.stop()
self.assertEqual(exp_results, results)
val[0] = True
with util.ArrayBarrier(partial(_Callback, [2]), compact=True) as b:
b.Callback()(None)
b.Callback()(2)
b.Callback()(None)
self.wait()
self.assertTrue(val[0])
class ArrayBarrierTestCase(testing.BaseTestCase):
"""Tests for ArrayBarrier barrier type."""
def testArrayBarrier(self):
val = [False]
def _Callback(exp_results, results):
self.stop()
self.assertEqual(exp_results, results)
val[0] = True
with util.ArrayBarrier(partial(_Callback, ['cb1', 'cb2', 'cb3', 'cb4'])) as b:
b.Callback()('cb1')
b.Callback()('cb2')
b.Callback()('cb3')
b.Callback()('cb4')
self.wait()
self.assertTrue(val[0])
class DictBarrierTestCase(testing.BaseTestCase):
"""Tests for DictBarrier type."""
def testDictBarrier(self):
val = [False]
def _Callback(exp_results, results):
self.stop()
self.assertEqual(exp_results, results)
val[0] = True
with util.DictBarrier(partial(_Callback, {'key1': 1, 'key2': 2, 'key3': 3})) as b:
b.Callback('key1')(1)
b.Callback('key2')(2)
b.Callback('key3')(3)
self.wait()
self.assertTrue(val[0])
class ExceptionBarrierTestCase(testing.BaseTestCase):
"""Tests for ExceptionBarrier type."""
def testImmediateException(self):
"""Test exception raised before barrier context is exited."""
def _OnException(type, value, tb):
self.stop()
with util.ExceptionBarrier(_OnException):
raise Exception('an error')
self.wait()
def testDelayedException(self):
"""Test exception raised after initial barrier context has exited."""
def _OnException(type, value, tb):
self.stop()
def _RaiseException():
raise Exception('an error')
with util.ExceptionBarrier(_OnException):
self.io_loop.add_callback(_RaiseException)
self.wait()
def testCallback(self):
"""ERROR: Try to use Callback() method on barrier."""
def _OnException(type, value, tb):
self.assertEqual(type, AssertionError)
self.stop()
with util.ExceptionBarrier(_OnException) as b:
b.Callback()
self.wait()
def testMultipleExceptions(self):
"""ERROR: Raise multiple exceptions within scope of exception barrier."""
def _OnException(type, value, tb):
self.stop()
def _RaiseException():
raise Exception('an error')
with util.ExceptionBarrier(_OnException) as b:
self.io_loop.add_callback(_RaiseException)
self.io_loop.add_callback(_RaiseException)
self.wait()
class NestedBarrierTestCase(testing.BaseTestCase):
def testUnhandledExeption(self):
"""Verify that without an exception handler, a thrown exception
in a barrier propagates.
"""
success = [False]
def _Op(cb):
raise ZeroDivisionError('exception')
def _OnSuccess():
success[0] = True
def _RunBarrier():
with util.Barrier(_OnSuccess) as b:
_Op(b.Callback())
self.assertRaises(ZeroDivisionError, _RunBarrier)
self.assertTrue(not success[0])
def testHandledException(self):
"""Verify that if an exception handler is specified, a thrown
exception doesn't propagate.
"""
exception = [False]
success = [False]
def _OnException(type, value, traceback):
exception[0] = True
self.io_loop.add_callback(self.stop)
def _OnSuccess():
success[0] = True
def _Op(cb):
raise Exception('exception')
with util.Barrier(_OnSuccess, on_exception=_OnException) as b:
_Op(b.Callback())
self.wait()
self.assertTrue(exception[0])
self.assertTrue(not success[0])
def testNestedBarriers(self):
"""Verify that a handled exception in a nested barrier doesn't prevent
outer barrier from completing.
"""
exceptions = [False, False]
level1_reached = [False]
def _Level2Exception(type, value, traceback):
exceptions[1] = True
def _Level2(cb):
raise Exception('exception in level 2')
def _Level1Exception(type, value, traceback):
exceptions[0] = True
def _OnLevel1():
self.io_loop.add_callback(self.stop)
level1_reached[0] = True
def _Level1(cb):
with util.Barrier(None, on_exception=_Level2Exception) as b:
_Level2(b.Callback())
_OnLevel1()
with util.Barrier(_OnLevel1, on_exception=_Level1Exception) as b:
_Level1(b.Callback())
self.wait()
self.assertTrue(not exceptions[0])
self.assertTrue(exceptions[1])
self.assertTrue(level1_reached[0])
class ParseHostPortTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testSimple(self):
self.assertEquals(util.ParseHostPort("host:80"), ("host", 80))
def testSimple2(self):
self.assertEquals(util.ParseHostPort("host.example.com:80"), ("host.example.com", 80))
def testIP(self):
self.assertEquals(util.ParseHostPort("127.0.0.1:80"), ("127.0.0.1", 80))
def testEmpty(self):
self.assertRaises(TypeError, util.ParseHostPort, "")
def testHostOnly(self):
self.assertRaises(TypeError, util.ParseHostPort, "host")
def testPortOnly(self):
self.assertRaises(TypeError, util.ParseHostPort, ":1")
def testThreeValues(self):
self.assertRaises(TypeError, util.ParseHostPort, "host:1:2")
def testNoColon(self):
self.assertRaises(TypeError, util.ParseHostPort, "host;1")
def testNonIntegerPort(self):
self.assertRaises(TypeError, util.ParseHostPort, "host:port")
def testOutOfRangePort(self):
self.assertRaises(TypeError, util.ParseHostPort, "host:65536")
def testLongPort(self):
self.assertRaises(TypeError, util.ParseHostPort, "host:1000000000000")
class VarLengthEncodeDecodeTestCase(unittest.TestCase):
def testEncode(self):
self._VerifyEncodeDecode(1, '\x01')
self._VerifyEncodeDecode(2, '\x02')
self._VerifyEncodeDecode(127, '\x7f')
self._VerifyEncodeDecode(128, '\x80\x01')
self._VerifyEncodeDecode(255, '\xff\x01')
self._VerifyEncodeDecode(0xffff, '\xff\xff\x03')
self._VerifyEncodeDecode(0xffffffff, '\xff\xff\xff\xff\x0f')
self._VerifyEncodeDecode(0xffffffffffffffff, '\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01')
def testConcatEncodeDecode(self):
numbers = [0xfff112, 0x12, 0x0, 0xffffffffff]
raw_bytes = ''
for n in numbers:
raw_bytes += util.EncodeVarLengthNumber(n)
for n in numbers:
val, length = util.DecodeVarLengthNumber(raw_bytes)
self.assertEqual(val, n)
raw_bytes = raw_bytes[length:]
def testInvalidDecode(self):
self.assertRaises(TypeError, util.DecodeVarLengthNumber, '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff')
def _VerifyEncodeDecode(self, number, string):
self.assertEqual(util.EncodeVarLengthNumber(number), string)
self.assertEqual(util.DecodeVarLengthNumber(string), (number, len(string)))
class DecayingStatTestCase(unittest.TestCase):
def testDecay(self):
now = 0.0
stat = util.DecayingStat(half_life=1.0, now=now)
stat.Add(1.0, now)
self.assertAlmostEquals(stat.Get(now), 1.0)
stat.Add(1.0, now)
self.assertAlmostEquals(stat.Get(now), 2.0)
now = 1.0
self.assertAlmostEquals(stat.Get(now), 1.0)
class LRUCacheTestCase(unittest.TestCase):
def testExpiration(self):
cache = util.LRUCache(4)
# Populate the cache
self.assertEqual(cache.Get(1, lambda: 1), 1)
self.assertEqual(cache.Get(2, lambda: 2), 2)
self.assertEqual(cache.Get(3, lambda: 3), 3)
self.assertEqual(cache.Get(4, lambda: 4), 4)
# Access 2 and 1 to move them to the top (and see that they are not yet evicted, so the
# factory function is ignored)
self.assertEqual(cache.Get(2, lambda: None), 2)
self.assertEqual(cache.Get(1, lambda: None), 1)
# Add a fifth object and see #3 get evicted:
self.assertEqual(cache.Get(5, lambda: 5), 5)
self.assertEqual(cache.Get(3, lambda: None), None)
class ThrottleRateTestCase(unittest.TestCase):
def testThrottle(self):
util._TEST_TIME = time.time()
# Null and empty cases.
self.assertEqual(util.ThrottleRate(None, 1, 1), ({'count': 1, 'start_time': util._TEST_TIME}, False))
self.assertEqual(util.ThrottleRate({}, 1, 1), ({'count': 1, 'start_time': util._TEST_TIME}, False))
# Increment existing.
self.assertEqual(util.ThrottleRate({'count': 1, 'start_time': util._TEST_TIME}, 2, 1),
({'count': 2, 'start_time': util._TEST_TIME}, False))
# Reset existing.
self.assertEqual(util.ThrottleRate({'count': 10, 'start_time': util._TEST_TIME - 1}, 1, 1),
({'count': 1, 'start_time': util._TEST_TIME}, False))
# Exceed.
self.assertEqual(util.ThrottleRate({}, 0, 1), ({'count': 0, 'start_time': util._TEST_TIME}, True))
self.assertEqual(util.ThrottleRate({'count': 1, 'start_time': util._TEST_TIME}, 1, 1),
({'count': 1, 'start_time': util._TEST_TIME}, True))
| |
"""\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
import io
import codecs
from . import handler
from . import xmlreader
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
return sys.stdout
if isinstance(out, io.TextIOBase):
# use a text writer as is
return out
if isinstance(out, (codecs.StreamWriter, codecs.StreamReaderWriter)):
# use a codecs stream writer as is
return out
# wrap a binary writer with TextIOWrapper
if isinstance(out, io.RawIOBase):
# Keep the original file open when the TextIOWrapper is
# destroyed
class _wrapper:
__class__ = out.__class__
def __getattr__(self, name):
return getattr(out, name)
buffer = _wrapper()
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
return io.TextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n',
write_through=True)
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._short_empty_elements = short_empty_elements
self._pending_start_element = False
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
def _finish_pending_start_element(self,endElement=False):
if self._pending_start_element:
self._write('>')
self._pending_start_element = False
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._finish_pending_start_element()
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElement(self, name):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._finish_pending_start_element()
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElementNS(self, name, qname):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % self._qname(name))
def characters(self, content):
if content:
self._finish_pending_start_element()
if not isinstance(content, str):
content = str(content, self._encoding)
self._write(escape(content))
def ignorableWhitespace(self, content):
if content:
self._finish_pending_start_element()
if not isinstance(content, str):
content = str(content, self._encoding)
self._write(content)
def processingInstruction(self, target, data):
self._finish_pending_start_element()
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base=""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name") and isinstance(f.name, str):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
| |
#
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware
from neutron.api.v2 import attributes as attrs
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import api as qdbapi
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.db import servicetype_db as st_db
from neutron.extensions import loadbalancer
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer import agent_scheduler
from neutron.services import provider_configuration as pconf
from neutron.services import service_base
LOG = logging.getLogger(__name__)
class LoadBalancerPlugin(ldb.LoadBalancerPluginDb,
agent_scheduler.LbaasAgentSchedulerDbMixin):
"""Implementation of the Neutron Loadbalancer Service Plugin.
This class manages the workflow of LBaaS request/response.
Most DB related works are implemented in class
loadbalancer_db.LoadBalancerPluginDb.
"""
supported_extension_aliases = ["lbaas",
"lbaas_agent_scheduler",
"service-type"]
# lbaas agent notifiers to handle agent update operations;
# can be updated by plugin drivers while loading;
# will be extracted by neutron manager when loading service plugins;
agent_notifiers = {}
def __init__(self):
"""Initialization for the loadbalancer service plugin."""
qdbapi.register_models()
self.service_type_manager = st_db.ServiceTypeManager.get_instance()
self._load_drivers()
def _load_drivers(self):
"""Loads plugin-drivers specified in configuration."""
self.drivers, self.default_provider = service_base.load_drivers(
constants.LOADBALANCER, self)
# we're at the point when extensions are not loaded yet
# so prevent policy from being loaded
ctx = context.get_admin_context(load_admin_roles=False)
# stop service in case provider was removed, but resources were not
self._check_orphan_pool_associations(ctx, self.drivers.keys())
def _check_orphan_pool_associations(self, context, provider_names):
"""Checks remaining associations between pools and providers.
If admin has not undeployed resources with provider that was deleted
from configuration, neutron service is stopped. Admin must delete
resources prior to removing providers from configuration.
"""
pools = self.get_pools(context)
lost_providers = set([pool['provider'] for pool in pools
if pool['provider'] not in provider_names])
# resources are left without provider - stop the service
if lost_providers:
msg = _("Delete associated loadbalancer pools before "
"removing providers %s") % list(lost_providers)
LOG.exception(msg)
raise SystemExit(msg)
def _get_driver_for_provider(self, provider):
if provider in self.drivers:
return self.drivers[provider]
# raise if not associated (should never be reached)
raise n_exc.Invalid(_("Error retrieving driver for provider %s") %
provider)
def _get_driver_for_pool(self, context, pool_id):
pool = self.get_pool(context, pool_id)
try:
return self.drivers[pool['provider']]
except KeyError:
raise n_exc.Invalid(_("Error retrieving provider for pool %s") %
pool_id)
def get_plugin_type(self):
return constants.LOADBALANCER
def get_plugin_description(self):
return "Neutron LoadBalancer Service Plugin"
def create_vip(self, context, vip):
v = super(LoadBalancerPlugin, self).create_vip(context, vip)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.create_vip(context, v)
return v
def update_vip(self, context, id, vip):
if 'status' not in vip['vip']:
vip['vip']['status'] = constants.PENDING_UPDATE
old_vip = self.get_vip(context, id)
v = super(LoadBalancerPlugin, self).update_vip(context, id, vip)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.update_vip(context, old_vip, v)
return v
def _delete_db_vip(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_vip(context, id)
def delete_vip(self, context, id):
self.update_status(context, ldb.Vip,
id, constants.PENDING_DELETE)
v = self.get_vip(context, id)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.delete_vip(context, v)
def _get_provider_name(self, context, pool):
if ('provider' in pool and
pool['provider'] != attrs.ATTR_NOT_SPECIFIED):
provider_name = pconf.normalize_provider_name(pool['provider'])
self.validate_provider(provider_name)
return provider_name
else:
if not self.default_provider:
raise pconf.DefaultServiceProviderNotFound(
service_type=constants.LOADBALANCER)
return self.default_provider
def create_pool(self, context, pool):
provider_name = self._get_provider_name(context, pool['pool'])
p = super(LoadBalancerPlugin, self).create_pool(context, pool)
self.service_type_manager.add_resource_association(
context,
constants.LOADBALANCER,
provider_name, p['id'])
#need to add provider name to pool dict,
#because provider was not known to db plugin at pool creation
p['provider'] = provider_name
driver = self.drivers[provider_name]
try:
driver.create_pool(context, p)
except loadbalancer.NoEligibleBackend:
# that should catch cases when backend of any kind
# is not available (agent, appliance, etc)
self.update_status(context, ldb.Pool,
p['id'], constants.ERROR,
"No eligible backend")
raise loadbalancer.NoEligibleBackend(pool_id=p['id'])
return p
def update_pool(self, context, id, pool):
if 'status' not in pool['pool']:
pool['pool']['status'] = constants.PENDING_UPDATE
old_pool = self.get_pool(context, id)
p = super(LoadBalancerPlugin, self).update_pool(context, id, pool)
driver = self._get_driver_for_provider(p['provider'])
driver.update_pool(context, old_pool, p)
return p
def _delete_db_pool(self, context, id):
# proxy the call until plugin inherits from DBPlugin
# rely on uuid uniqueness:
try:
with context.session.begin(subtransactions=True):
self.service_type_manager.del_resource_associations(
context, [id])
super(LoadBalancerPlugin, self).delete_pool(context, id)
except Exception:
# that should not happen
# if it's still a case - something goes wrong
# log the error and mark the pool as ERROR
LOG.error(_('Failed to delete pool %s, putting it in ERROR state'),
id)
with excutils.save_and_reraise_exception():
self.update_status(context, ldb.Pool,
id, constants.ERROR)
def delete_pool(self, context, id):
# check for delete conditions and update the status
# within a transaction to avoid a race
with context.session.begin(subtransactions=True):
self.update_status(context, ldb.Pool,
id, constants.PENDING_DELETE)
self._ensure_pool_delete_conditions(context, id)
p = self.get_pool(context, id)
driver = self._get_driver_for_provider(p['provider'])
driver.delete_pool(context, p)
def create_member(self, context, member):
m = super(LoadBalancerPlugin, self).create_member(context, member)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.create_member(context, m)
return m
def update_member(self, context, id, member):
if 'status' not in member['member']:
member['member']['status'] = constants.PENDING_UPDATE
old_member = self.get_member(context, id)
m = super(LoadBalancerPlugin, self).update_member(context, id, member)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.update_member(context, old_member, m)
return m
def _delete_db_member(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_member(context, id)
def delete_member(self, context, id):
self.update_status(context, ldb.Member,
id, constants.PENDING_DELETE)
m = self.get_member(context, id)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.delete_member(context, m)
def create_health_monitor(self, context, health_monitor):
hm = super(LoadBalancerPlugin, self).create_health_monitor(
context,
health_monitor
)
return hm
def update_health_monitor(self, context, id, health_monitor):
old_hm = self.get_health_monitor(context, id)
hm = super(LoadBalancerPlugin, self).update_health_monitor(
context,
id,
health_monitor
)
with context.session.begin(subtransactions=True):
qry = context.session.query(
ldb.PoolMonitorAssociation
).filter_by(monitor_id=hm['id']).join(ldb.Pool)
for assoc in qry:
driver = self._get_driver_for_pool(context, assoc['pool_id'])
driver.update_pool_health_monitor(context, old_hm,
hm, assoc['pool_id'])
return hm
def _delete_db_pool_health_monitor(self, context, hm_id, pool_id):
super(LoadBalancerPlugin, self).delete_pool_health_monitor(context,
hm_id,
pool_id)
def _delete_db_health_monitor(self, context, id):
super(LoadBalancerPlugin, self).delete_health_monitor(context, id)
def create_pool_health_monitor(self, context, health_monitor, pool_id):
retval = super(LoadBalancerPlugin, self).create_pool_health_monitor(
context,
health_monitor,
pool_id
)
monitor_id = health_monitor['health_monitor']['id']
hm = self.get_health_monitor(context, monitor_id)
driver = self._get_driver_for_pool(context, pool_id)
driver.create_pool_health_monitor(context, hm, pool_id)
return retval
def delete_pool_health_monitor(self, context, id, pool_id):
self.update_pool_health_monitor(context, id, pool_id,
constants.PENDING_DELETE)
hm = self.get_health_monitor(context, id)
driver = self._get_driver_for_pool(context, pool_id)
driver.delete_pool_health_monitor(context, hm, pool_id)
def stats(self, context, pool_id):
driver = self._get_driver_for_pool(context, pool_id)
stats_data = driver.stats(context, pool_id)
# if we get something from the driver -
# update the db and return the value from db
# else - return what we have in db
if stats_data:
super(LoadBalancerPlugin, self).update_pool_stats(
context,
pool_id,
stats_data
)
return super(LoadBalancerPlugin, self).stats(context,
pool_id)
def populate_vip_graph(self, context, vip):
"""Populate the vip with: pool, members, healthmonitors."""
pool = self.get_pool(context, vip['pool_id'])
vip['pool'] = pool
vip['members'] = [self.get_member(context, member_id)
for member_id in pool['members']]
vip['health_monitors'] = [self.get_health_monitor(context, hm_id)
for hm_id in pool['health_monitors']]
return vip
def validate_provider(self, provider):
if provider not in self.drivers:
raise pconf.ServiceProviderNotFound(
provider=provider, service_type=constants.LOADBALANCER)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkProfilesOperations(object):
"""NetworkProfilesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_profile_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_profile_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network profile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the NetworkProfile.
:type network_profile_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_profile_name=network_profile_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_profile_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkProfile"
"""Gets the specified network profile in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the public IP prefix.
:type network_profile_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkProfile, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.NetworkProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
network_profile_name, # type: str
parameters, # type: "_models.NetworkProfile"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkProfile"
"""Creates or updates a network profile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the network profile.
:type network_profile_name: str
:param parameters: Parameters supplied to the create or update network profile operation.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.NetworkProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkProfile, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.NetworkProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkProfile')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
network_profile_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkProfile"
"""Updates network profile tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the network profile.
:type network_profile_name: str
:param parameters: Parameters supplied to update network profile tags.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkProfile, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.NetworkProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkProfileListResult"]
"""Gets all the network profiles in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkProfileListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_06_01.models.NetworkProfileListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfileListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkProfileListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkProfiles'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkProfileListResult"]
"""Gets all network profiles in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkProfileListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_06_01.models.NetworkProfileListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfileListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkProfileListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles'} # type: ignore
| |
from __future__ import unicode_literals
import json
import mimetypes
import os
import re
import sys
from copy import copy
from importlib import import_module
from io import BytesIO
from django.apps import apps
from django.conf import settings
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import ISO_8859_1, UTF_8, WSGIRequest
from django.core.signals import (
got_request_exception, request_finished, request_started,
)
from django.db import close_old_connections
from django.http import HttpRequest, QueryDict, SimpleCookie
from django.template import TemplateDoesNotExist
from django.test import signals
from django.test.utils import ContextList
from django.urls import resolve
from django.utils import six
from django.utils.encoding import force_bytes, force_str, uri_to_iri
from django.utils.functional import SimpleLazyObject, curry
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils.six.moves.urllib.parse import urlparse, urlsplit
__all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class RedirectCycleError(Exception):
"""
The test client has been asked to follow a redirect loop.
"""
def __init__(self, message, last_response):
super(RedirectCycleError, self).__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload(object):
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
for item in iterable:
yield item
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Uses the WSGI
interface to compose requests, but returns the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# We're emulating a WSGI server; we must call the close method
# on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
if 'context' not in store:
store['context'] = ContextList()
store['context'].append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
])
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
filename = os.path.basename(file.name) if hasattr(file, 'name') else ''
if hasattr(file, 'content_type'):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = 'application/octet-stream'
if not filename:
filename = key
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename)),
to_bytes('Content-Type: %s' % content_type),
b'',
to_bytes(file.read())
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('testserver'),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = force_str(parsed[2])
# If there are parameters, add them
if parsed[3]:
path += str(";") + force_str(parsed[3])
path = uri_to_iri(path).encode(UTF_8)
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. We replicate this behavior here.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode(ISO_8859_1) if six.PY3 else path
def get(self, path, data=None, secure=False, **extra):
"Construct a GET request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('GET', path, secure=secure, **r)
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"Construct a POST request."
data = {} if data is None else data
post_data = self._encode_data(data, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"Construct a HEAD request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('HEAD', path, secure=secure, **r)
def trace(self, path, secure=False, **extra):
"Construct a TRACE request."
return self.generic('TRACE', path, secure=secure, **extra)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PUT request."
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PATCH request."
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a DELETE request."
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Constructs an arbitrary HTTP request."""
parsed = urlparse(force_str(path))
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': str(method),
'SERVER_PORT': str('443') if secure else str('80'),
'wsgi.url_scheme': str('https') if secure else str('http'),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
query_string = force_bytes(parsed[4])
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
query_string = query_string.decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if apps.is_installed('django.contrib.sessions'):
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
else:
s = engine.SessionStore()
s.save()
self.cookies[settings.SESSION_COOKIE_NAME] = s.session_key
return s
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
six.reraise(*exc_info)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = curry(self._parse_json, response)
# Attach the ResolverMatch instance to the response
response.resolver_match = SimpleLazyObject(lambda: resolve(request['PATH_INFO']))
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
def get(self, path, data=None, follow=False, secure=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PATCH.
"""
response = super(Client, self).patch(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def trace(self, path, data='', follow=False, secure=False, **extra):
"""
Send a TRACE request to the server.
"""
response = super(Client, self).trace(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
from django.contrib.auth import authenticate
user = authenticate(**credentials)
if (user and user.is_active and
apps.is_installed('django.contrib.sessions')):
self._login(user)
return True
else:
return False
def force_login(self, user, backend=None):
self._login(user, backend)
def _login(self, user, backend=None):
from django.contrib.auth import login
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user, backend)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
from django.contrib.auth import get_user, logout
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
request.user = get_user(request)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _parse_json(self, response, **extra):
if 'application/json' not in response.get('Content-Type'):
raise ValueError(
'Content-Type header is "{0}", not "application/json"'
.format(response.get('Content-Type'))
)
return json.loads(response.content.decode(), **extra)
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
response_url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((response_url, response.status_code))
url = urlsplit(response_url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
response = self.get(url.path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
if redirect_chain[-1] in redirect_chain[:-1]:
# Check that we're not redirecting to somewhere we've already
# been to, to prevent loops.
raise RedirectCycleError("Redirect loop detected.", last_response=response)
if len(redirect_chain) > 20:
# Such a lengthy chain likely also means a loop, but one with
# a growing path, changing view, or changing query argument;
# 20 is the value of "network.http.redirection-limit" from Firefox.
raise RedirectCycleError("Too many redirects.", last_response=response)
return response
| |
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
This test checks that blocks containing segwit recovery transactions will be accepted,
that segwit recovery transactions are rejected from mempool acceptance (even with
-acceptnonstdtxn=1), and that segwit recovery transactions don't result in bans.
"""
import time
from typing import Optional, Sequence
from test_framework.blocktools import (
create_block,
create_coinbase,
make_conform_to_ctor,
)
from test_framework.messages import (
COIN,
CBlock,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
ToHex,
)
from test_framework.p2p import P2PDataStore
from test_framework.script import (
OP_EQUAL,
OP_HASH160,
OP_TRUE,
CScript,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error
TEST_TIME = int(time.time())
# Error due to non clean stack
CLEANSTACK_ERROR = 'non-mandatory-script-verify-flag (Stack size must be exactly one after execution)'
RPC_CLEANSTACK_ERROR = CLEANSTACK_ERROR
EVAL_FALSE_ERROR = 'non-mandatory-script-verify-flag (Script evaluated without error but finished with a false/empty top stack elem'
RPC_EVAL_FALSE_ERROR = EVAL_FALSE_ERROR + "ent)"
class PreviousSpendableOutput(object):
def __init__(self, tx=CTransaction(), n=-1):
self.tx = tx
self.n = n
class SegwitRecoveryTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.tip_height = 0
# We have 2 nodes:
# 1) node_nonstd (nodes[0]) accepts non-standard txns. It does not
# accept Segwit recovery transactions, since it is included in
# standard flags, and transactions that violate these flags are
# never accepted into the mempool.
# 2) node_std (nodes[1]) doesn't accept non-standard txns and
# doesn't have us whitelisted. It's used to test for bans, as we
# connect directly to it via mininode and send a segwit spending
# txn. This transaction is non-standard. We check that sending
# this transaction doesn't result in a ban.
# Nodes are connected to each other, so node_std receives blocks and
# transactions that node_nonstd has accepted. Since we are checking
# that segwit spending txn are not resulting in bans, node_nonstd
# doesn't get banned when forwarding this kind of transactions to
# node_std.
self.extra_args = [['-whitelist=noban@127.0.0.1',
"-acceptnonstdtxn"],
["-acceptnonstdtxn=0"]]
def make_block(self, base_block: Optional[CBlock]) -> CBlock:
"""
Build a new block and return it.
Increment the tip_height counter.
If base_block is None, use the genesis block as base block.
"""
if base_block is None:
base_block_hash = self.genesis_hash
block_time = TEST_TIME
else:
base_block_hash = base_block.sha256
block_time = base_block.nTime + 1
# First create the coinbase
self.tip_height += 1
coinbase = create_coinbase(self.tip_height)
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
# Do PoW, which is cheap on regnet
block.solve()
return block
def run_test(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
spendable_outputs = []
# shorthand
node_nonstd = self.nodes[0]
node_std = self.nodes[1]
peer_nonstd = node_nonstd.add_p2p_connection(P2PDataStore())
peer_std = node_std.add_p2p_connection(P2PDataStore())
# adds transactions to the block and updates state
def update_block(block: CBlock,
new_transactions: Sequence[CTransaction]):
block.vtx.extend(new_transactions)
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Returns 2 transactions:
# 1) txfund: create outputs in segwit addresses
# 2) txspend: spends outputs from segwit addresses
def create_segwit_fund_and_spend_tx(spend, case0=False):
if not case0:
# Spending from a P2SH-P2WPKH coin,
# txhash:a45698363249312f8d3d93676aa714be59b0bd758e62fa054fb1ea6218480691
redeem_script0 = bytearray.fromhex(
'0014fcf9969ce1c98a135ed293719721fb69f0b686cb')
# Spending from a P2SH-P2WSH coin,
# txhash:6b536caf727ccd02c395a1d00b752098ec96e8ec46c96bee8582be6b5060fa2f
redeem_script1 = bytearray.fromhex(
'0020fc8b08ed636cb23afcb425ff260b3abd03380a2333b54cfa5d51ac52d803baf4')
else:
redeem_script0 = bytearray.fromhex('51020000')
redeem_script1 = bytearray.fromhex('53020080')
redeem_scripts = [redeem_script0, redeem_script1]
# Fund transaction to segwit addresses
txfund = CTransaction()
txfund.vin = [CTxIn(COutPoint(spend.tx.sha256, spend.n))]
amount = (50 * COIN - 1000) // len(redeem_scripts)
for redeem_script in redeem_scripts:
txfund.vout.append(
CTxOut(amount, CScript([OP_HASH160, hash160(redeem_script), OP_EQUAL])))
txfund.rehash()
# Segwit spending transaction
# We'll test if a node that checks for standardness accepts this
# txn. It should fail exclusively because of the restriction in
# the scriptSig (non clean stack..), so all other characteristcs
# must pass standardness checks. For this reason, we create
# standard P2SH outputs.
txspend = CTransaction()
for i in range(len(redeem_scripts)):
txspend.vin.append(
CTxIn(COutPoint(txfund.sha256, i), CScript([redeem_scripts[i]])))
txspend.vout = [CTxOut(50 * COIN - 2000,
CScript([OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL]))]
txspend.rehash()
return txfund, txspend
# Create a new block
block = self.make_block(base_block=None)
spendable_outputs.append(block)
peer_nonstd.send_blocks_and_test([block], node_nonstd)
# Now we need that block to mature so we can spend the coinbase.
matureblocks = []
for _ in range(199):
block = self.make_block(block)
matureblocks.append(block)
spendable_outputs.append(block)
peer_nonstd.send_blocks_and_test(matureblocks, node_nonstd)
# collect spendable outputs now to avoid cluttering the code later on
out = []
for _ in range(100):
out.append(
PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0))
# Create segwit funding and spending transactions
txfund, txspend = create_segwit_fund_and_spend_tx(out[0])
txfund_case0, txspend_case0 = create_segwit_fund_and_spend_tx(
out[1], True)
# Mine txfund, as it can't go into node_std mempool because it's
# nonstandard.
block = self.make_block(block)
update_block(block, [txfund, txfund_case0])
peer_nonstd.send_blocks_and_test([block], node_nonstd)
# Check both nodes are synchronized before continuing.
self.sync_blocks()
# Check that upgraded nodes checking for standardness are not banning
# nodes sending segwit spending txns.
peer_nonstd.send_txs_and_test([txspend], node_nonstd, success=False,
reject_reason=CLEANSTACK_ERROR)
peer_nonstd.send_txs_and_test([txspend_case0], node_nonstd, success=False,
reject_reason=EVAL_FALSE_ERROR)
peer_std.send_txs_and_test([txspend], node_std, success=False,
reject_reason=CLEANSTACK_ERROR)
peer_std.send_txs_and_test([txspend_case0], node_std, success=False,
reject_reason=EVAL_FALSE_ERROR)
# Segwit recovery txns are never accepted into the mempool,
# as they are included in standard flags.
assert_raises_rpc_error(-26, RPC_CLEANSTACK_ERROR,
node_nonstd.sendrawtransaction, ToHex(txspend))
assert_raises_rpc_error(-26, RPC_EVAL_FALSE_ERROR,
node_nonstd.sendrawtransaction, ToHex(txspend_case0))
assert_raises_rpc_error(-26, RPC_CLEANSTACK_ERROR,
node_std.sendrawtransaction, ToHex(txspend))
assert_raises_rpc_error(-26, RPC_EVAL_FALSE_ERROR,
node_std.sendrawtransaction, ToHex(txspend_case0))
# Blocks containing segwit spending txns are accepted in both nodes.
block = self.make_block(block)
update_block(block, [txspend, txspend_case0])
peer_nonstd.send_blocks_and_test([block], node_nonstd)
self.sync_blocks()
if __name__ == '__main__':
SegwitRecoveryTest().main()
| |
"""Base class for platform implementations
"""
import ctypes
from OpenGL.platform import ctypesloader
import sys
import OpenGL as top_level_module
from OpenGL import logs, MODULE_ANNOTATIONS
class _CheckContext( object ):
def __init__( self, func, ccisvalid ):
self.func = func
self.ccisvalid = ccisvalid
def __setattr__( self, key, value ):
if key not in ('func','ccisvalid'):
return setattr( self.func, key, value )
else:
self.__dict__[key] = value
def __getattr__( self, key ):
if key != 'func':
return getattr(self.func, key )
raise AttributeError( key )
def __call__( self, *args, **named ):
if not self.ccisvalid():
from OpenGL import error
raise error.NoContext( self.func, args, named )
return self.func( *args, **named )
def _find_module( exclude = (__name__,)):
frame = sys._getframe()
while frame and '__name__' in frame.f_globals:
if exclude:
if not frame.f_globals['__name__'] in exclude:
return frame.f_globals['__name__']
else:
return frame.f_globals['__name__']
frame = frame.f_back
return None
class BasePlatform( object ):
"""Base class for per-platform implementations
Attributes of note:
EXPORTED_NAMES -- set of names exported via the platform
module's namespace...
GL, GLU, GLUT, GLE, OpenGL -- ctypes libraries
DEFAULT_FUNCTION_TYPE -- used as the default function
type for functions unless overridden on a per-DLL
basis with a "FunctionType" member
GLUT_GUARD_CALLBACKS -- if True, the GLUT wrappers
will provide guarding wrappers to prevent GLUT
errors with uninitialised GLUT.
EXTENSIONS_USE_BASE_FUNCTIONS -- if True, uses regular
dll attribute-based lookup to retrieve extension
function pointers.
"""
EXPORTED_NAMES = [
'GetCurrentContext','CurrentContextIsValid','safeGetError',
'createBaseFunction', 'createExtensionFunction', 'copyBaseFunction',
'GL','GLU','GLUT','GLE','OpenGL',
'getGLUTFontPointer',
'GLUT_GUARD_CALLBACKS',
]
DEFAULT_FUNCTION_TYPE = None
GLUT_GUARD_CALLBACKS = False
EXTENSIONS_USE_BASE_FUNCTIONS = False
def install( self, namespace ):
"""Install this platform instance into the platform module"""
for name in self.EXPORTED_NAMES:
namespace[ name ] = getattr(self,name)
namespace['PLATFORM'] = self
return self
def functionTypeFor( self, dll ):
"""Given a DLL, determine appropriate function type..."""
if hasattr( dll, 'FunctionType' ):
return dll.FunctionType
else:
return self.DEFAULT_FUNCTION_TYPE
def errorChecking( self, func, dll ):
"""Add error checking to the function if appropriate"""
from OpenGL import error
if top_level_module.ERROR_CHECKING:
if dll not in (self.GLUT,):
#GLUT spec says error-checking is basically undefined...
# there *may* be GL errors on GLUT calls that e.g. render
# geometry, but that's all basically "maybe" stuff...
func.errcheck = error.glCheckError
return func
def wrapContextCheck( self, func, dll ):
"""Wrap function with context-checking if appropriate"""
if top_level_module.CONTEXT_CHECKING and dll is not self.GLUT:
return _CheckContext( func, self.CurrentContextIsValid )
return func
def wrapLogging( self, func ):
"""Wrap function with logging operations if appropriate"""
return logs.logOnFail( func, logs.getLog( 'OpenGL.errors' ))
def finalArgType( self, typ ):
"""Retrieve a final type for arg-type"""
if typ == ctypes.POINTER( None ) and not getattr( typ, 'final',False):
from OpenGL.arrays import ArrayDatatype
return ArrayDatatype
else:
return typ
def constructFunction(
self,
functionName, dll,
resultType=ctypes.c_int, argTypes=(),
doc = None, argNames = (),
extension = None,
deprecated = False,
module = None,
):
"""Core operation to create a new base ctypes function
raises AttributeError if can't find the procedure...
"""
if extension and not self.checkExtension( extension ):
raise AttributeError( """Extension not available""" )
argTypes = [ self.finalArgType( t ) for t in argTypes ]
if extension and not self.EXTENSIONS_USE_BASE_FUNCTIONS:
# what about the VERSION values???
if self.checkExtension( extension ):
pointer = self.getExtensionProcedure( functionName )
if pointer:
func = self.functionTypeFor( dll )(
resultType,
*argTypes
)(
pointer
)
else:
raise AttributeError( """Extension %r available, but no pointer for function %r"""%(extension,functionName))
else:
raise AttributeError( """No extension %r"""%(extension,))
else:
func = ctypesloader.buildFunction(
self.functionTypeFor( dll )(
resultType,
*argTypes
),
functionName,
dll,
)
func.__doc__ = doc
func.argNames = list(argNames or ())
func.__name__ = functionName
func.DLL = dll
func.extension = extension
func.deprecated = deprecated
func = self.wrapLogging(
self.wrapContextCheck(
self.errorChecking( func, dll ),
dll,
)
)
if MODULE_ANNOTATIONS:
if not module:
module = _find_module( )
if module:
func.__module__ = module
return func
def createBaseFunction(
self,
functionName, dll,
resultType=ctypes.c_int, argTypes=(),
doc = None, argNames = (),
extension = None,
deprecated = False,
module = None,
):
"""Create a base function for given name
Normally you can just use the dll.name hook to get the object,
but we want to be able to create different bindings for the
same function, so we do the work manually here to produce a
base function from a DLL.
"""
from OpenGL import wrapper
result = None
try:
if (
top_level_module.FORWARD_COMPATIBLE_ONLY and
dll is self.GL and
deprecated
):
result = self.nullFunction(
functionName, dll=dll,
resultType=resultType,
argTypes=argTypes,
doc = doc, argNames = argNames,
extension = extension,
deprecated = deprecated,
)
else:
result = self.constructFunction(
functionName, dll,
resultType=resultType, argTypes=argTypes,
doc = doc, argNames = argNames,
extension = extension,
)
except AttributeError, err:
result = self.nullFunction(
functionName, dll=dll,
resultType=resultType,
argTypes=argTypes,
doc = doc, argNames = argNames,
extension = extension,
)
if MODULE_ANNOTATIONS:
if not module:
module = _find_module( )
if module:
result.__module__ = module
return result
def checkExtension( self, name ):
"""Check whether the given extension is supported by current context"""
if not name:
return True
context = self.GetCurrentContext()
if context:
from OpenGL import contextdata
from OpenGL.raw.GL import GL_EXTENSIONS
set = contextdata.getValue( GL_EXTENSIONS, context=context )
if set is None:
set = {}
contextdata.setValue(
GL_EXTENSIONS, set, context=context, weak=False
)
current = set.get( name )
if current is None:
from OpenGL import extensions
result = extensions.hasGLExtension( name )
set[name] = result
return result
return current
else:
return False
createExtensionFunction = createBaseFunction
def copyBaseFunction( self, original ):
"""Create a new base function based on an already-created function
This is normally used to provide type-specific convenience versions of
a definition created by the automated generator.
"""
from OpenGL import wrapper, error
if isinstance( original, _NullFunctionPointer ):
return self.nullFunction(
original.__name__,
original.DLL,
resultType = original.restype,
argTypes= original.argtypes,
doc = original.__doc__,
argNames = original.argNames,
extension = original.extension,
deprecated = original.deprecated,
)
elif hasattr( original, 'originalFunction' ):
original = original.originalFunction
return self.createBaseFunction(
original.__name__, original.DLL,
resultType=original.restype, argTypes=original.argtypes,
doc = original.__doc__, argNames = original.argNames,
extension = original.extension,
deprecated = original.deprecated,
)
def nullFunction(
self,
functionName, dll,
resultType=ctypes.c_int,
argTypes=(),
doc = None, argNames = (),
extension = None,
deprecated = False,
module = None,
):
"""Construct a "null" function pointer"""
if deprecated:
base = _DeprecatedFunctionPointer
else:
base = _NullFunctionPointer
cls = type( functionName, (base,), {
'__doc__': doc,
'deprecated': deprecated,
} )
if MODULE_ANNOTATIONS:
if not module:
module = _find_module( )
if module:
cls.__module__ = module
return cls(
functionName, dll, resultType, argTypes, argNames, extension=extension, doc=doc,
)
def GetCurrentContext( self ):
"""Retrieve opaque pointer for the current context"""
raise NotImplementedError(
"""Platform does not define a GetCurrentContext function"""
)
def CurrentContextIsValid( self ):
"""Return boolean of whether current context is valid"""
raise NotImplementedError(
"""Platform does not define a CurrentContextIsValid function"""
)
def getGLUTFontPointer(self, constant ):
"""Retrieve a GLUT font pointer for this platform"""
raise NotImplementedError(
"""Platform does not define a GLUT font retrieval function"""
)
def safeGetError( self ):
"""Safety-checked version of glError() call (checks for valid context first)"""
raise NotImplementedError(
"""Platform does not define a safeGetError function"""
)
class _NullFunctionPointer( object ):
"""Function-pointer-like object for undefined functions"""
def __init__( self, name, dll, resultType, argTypes, argNames, extension=None, doc=None, deprecated=False ):
from OpenGL import error
self.__name__ = name
self.DLL = dll
self.argNames = argNames
self.argtypes = argTypes
self.errcheck = None
self.restype = resultType
self.extension = extension
self.doc = doc
self.deprecated = deprecated
resolved = False
def __nonzero__( self ):
"""Make this object appear to be NULL"""
if self.extension and not self.resolved:
self.load()
return self.resolved
def load( self ):
"""Attempt to load the function again, presumably with a context this time"""
from OpenGL import platform
if not platform.PLATFORM.checkExtension( self.extension ):
return None
try:
func = platform.PLATFORM.constructFunction(
self.__name__, self.DLL,
resultType=self.restype,
argTypes=self.argtypes,
doc = self.doc,
argNames = self.argNames,
extension = self.extension,
)
except AttributeError, err:
return None
else:
# now short-circuit so that we don't need to check again...
self.__class__.__call__ = staticmethod( func.__call__ )
self.resolved = True
return func
return None
def __call__( self, *args, **named ):
if self.load():
return self( *args, **named )
else:
from OpenGL import error
raise error.NullFunctionError(
"""Attempt to call an undefined function %s, check for bool(%s) before calling"""%(
self.__name__, self.__name__,
)
)
class _DeprecatedFunctionPointer( _NullFunctionPointer ):
deprecated = True
def __call__( self, *args, **named ):
from OpenGL import error
raise error.NullFunctionError(
"""Attempt to call a deprecated function %s while OpenGL in FORWARD_COMPATIBLE_ONLY mode. Set OpenGL.FORWARD_COMPATIBLE_ONLY to False to use legacy entry points"""%(
self.__name__,
)
)
| |
import roslib
roslib.load_manifest('hri_framework')
import rospy
from unittest import TestCase, TestLoader
from hri_msgs.msg import TimerAction, TimerGoal, TimerResult, TimerFeedback
from hri_api.actions import MultiGoalActionClient, ActionHandle
from hri_api.tests import TimerActionServer
from actionlib.action_client import GoalStatus
import time
from mock import Mock
__author__ = 'Jamie Diprose'
global count
count = 0
class TestMultiGoalActionClient(TestCase):
ACTION_SERVER_NAME = "test_action_server"
def setUp(self):
global count
rospy.init_node("test", anonymous=True)
self.action_server = TimerActionServer(TestMultiGoalActionClient.ACTION_SERVER_NAME + "_" + str(count))
self.action_server.start_server()
self.client = MultiGoalActionClient(TestMultiGoalActionClient.ACTION_SERVER_NAME + "_" + str(count), TimerAction)
self.client.wait_for_server()
count += 1
def tearDown(self):
self.client.stop()
def test_wait_for_server(self):
started1 = self.client.wait_for_server(timeout=rospy.Duration.from_sec(1.0))
client2 = MultiGoalActionClient("i_dont_exist", TimerAction)
started2 = client2.wait_for_server(timeout=rospy.Duration.from_sec(1.0))
self.assertEqual(started1, True)
self.assertEqual(started2, False)
def test_send_goal(self):
timer = TimerGoal()
timer.duration = 0.5
gh1 = self.client.send_goal(timer)
success = self.client.wait_for_result(gh1, timeout=rospy.Duration.from_sec(0.6))
self.assertEqual(success, True)
def test_get_result(self):
timer = TimerGoal()
timer.duration = 0.5
gh1 = self.client.send_goal(timer)
self.client.wait_for_result(gh1, timeout=rospy.Duration.from_sec(0.55))
result = self.client.get_result(gh1)
self.assertEqual(isinstance(result, TimerResult), True)
self.assertEqual(result.success, True)
def test_send_two_goals_serial(self):
# First goal
timer1 = TimerGoal()
timer1.duration = 0.5
gh1 = self.client.send_goal(timer1)
success1 = self.client.wait_for_result(gh1, timeout=rospy.Duration.from_sec(0.55))
# Second goal
timer2 = TimerGoal()
timer2.duration = 0.5
gh2 = self.client.send_goal(timer2)
success2 = self.client.wait_for_result(gh2, timeout=rospy.Duration.from_sec(0.6))
self.assertEqual(success1, True)
self.assertEqual(success2, True)
def test_send_two_goals_parallel(self):
timer1 = TimerGoal()
timer2 = TimerGoal()
timer1.duration = 1.0
timer2.duration = 1.0
start = time.time()
# Send both goals
gh1 = self.client.send_goal(timer1)
gh2 = self.client.send_goal(timer2)
result1 = self.client.wait_for_result(gh1, timeout=rospy.Duration.from_sec(1.1))
result2 = self.client.wait_for_result(gh2, timeout=rospy.Duration.from_sec(1.1))
end = time.time()
duration = end - start
self.assertEqual(result1, True)
self.assertEqual(result2, True)
# self.assertAlmostEqual(duration, 1.0, places=1)
def test_get_goal_id(self):
timer = TimerGoal()
timer.duration = 0.1
gh1 = self.client.send_goal(timer)
goal_id = self.client.goal_id(gh1)
self.assertIsNotNone(goal_id)
def test_is_tracking_goal(self):
timer = TimerGoal()
timer.duration = 0.1
gh1 = self.client.send_goal(timer)
tracking = self.client.is_tracking_goal(gh1)
self.assertEqual(tracking, True)
def test_get_action_handle(self):
timer = TimerGoal()
timer.duration = 1.0
gh1 = self.client.send_goal(timer)
time.sleep(0.5)
action_handle = self.client.get_action_handle(gh1)
self.assertEqual(isinstance(action_handle, ActionHandle), True)
def test_get_state(self):
timer = TimerGoal()
timer.duration = 1.0
gh1 = self.client.send_goal(timer)
pending = self.client.get_state(gh1)
time.sleep(0.5)
active = self.client.get_state(gh1)
self.client.wait_for_result(gh1, timeout=rospy.Duration.from_sec(1.1))
succeeded = self.client.get_state(gh1)
# gh2 = self.client.send_goal(timer)
# time.sleep(0.5)
# self.client.cancel_goal(gh2)
#preeempted = self.client.get_state(gh2)
self.assertEqual(pending, GoalStatus.PENDING)
self.assertEqual(active, GoalStatus.ACTIVE)
self.assertEqual(succeeded, GoalStatus.SUCCEEDED)
# self.assertEqual(preeempted, GoalStatus)
def test_cancel_goal(self):
timer1 = TimerGoal()
timer1.duration = 5.0
gh1 = self.client.send_goal(timer1)
pending = self.client.get_state(gh1); time.sleep(0.5)
active = self.client.get_state(gh1); self.client.cancel_goal(gh1); time.sleep(0.5)
preempted = self.client.get_state(gh1)
self.assertEqual(GoalStatus.PENDING, pending)
self.assertEqual(GoalStatus.ACTIVE, active)
self.assertEqual(GoalStatus.PREEMPTED, preempted)
def test_cancel_all_goals(self):
timer1 = TimerGoal()
timer2 = TimerGoal()
timer1.duration = 1.0
timer2.duration = 1.0
gh1 = self.client.send_goal(timer1)
gh2 = self.client.send_goal(timer2)
time.sleep(0.5)
self.client.cancel_all_goals()
time.sleep(0.5)
self.assertEqual(GoalStatus.PREEMPTED, self.client.get_state(gh1))
self.assertEqual(GoalStatus.PREEMPTED, self.client.get_state(gh2))
def test_cancel_goals_at_and_before_time(self):
before_time1 = TimerGoal()
before_time2 = TimerGoal()
before_time1.duration = 5.0
before_time2.duration = 5.0
gh1 = self.client.send_goal(before_time1)
gh2 = self.client.send_goal(before_time2)
cancel_time = rospy.Time().now()
after_time_1 = TimerGoal()
after_time_1.duration = 1.0
gh3 = self.client.send_goal(after_time_1)
time.sleep(0.5)
self.client.cancel_goals_at_and_before_time(cancel_time)
success = self.client.wait_for_result(gh3, timeout=rospy.Duration.from_sec(1.1))
self.assertEqual(GoalStatus.PREEMPTED, self.client.get_state(gh1))
self.assertEqual(GoalStatus.PREEMPTED, self.client.get_state(gh2))
self.assertEqual(GoalStatus.SUCCEEDED, self.client.get_state(gh3))
self.assertEqual(success, True)
def test_done_callback(self):
timer = TimerGoal()
timer.duration = 0.5
mock = Mock()
gh1 = self.client.send_goal(timer, done_cb=mock)
time.sleep(1.0)
mock.assert_called_once_with(gh1)
def test_active_callback(self):
timer = TimerGoal()
timer.duration = 0.5
mock = Mock()
gh1 = self.client.send_goal(timer, active_cb=mock)
self.client.wait_for_result(gh1, timeout=rospy.Duration.from_sec(1.1))
mock.assert_called_once_with(gh1)
def test_feedback_callback(self):
timer = TimerGoal()
timer.duration = 1.0
mock = Mock()
gh1 = self.client.send_goal(timer, feedback_cb=mock)
self.client.wait_for_result(gh1, timeout=rospy.Duration.from_sec(1.1))
mock.assert_called_once_with(gh1, TimerFeedback(current_time=10))
#calls = [TimerFeedback(current_time=0.2), TimerFeedback(current_time=0.4), TimerFeedback(current_time=0.6), TimerFeedback(current_time=0.8)]
#mock.assert_has_calls(mock, calls)
#
# def test_send_goal_and_wait(self):
# self.fail()
#
# def test_wait_for_result(self):
# self.fail()
#
# def test_get_goal_status_text(self):
# self.fail()
| |
"""Tests for AVM Fritz!Box config flow."""
from unittest.mock import patch
from fritzconnection.core.exceptions import FritzConnectionException, FritzSecurityError
import pytest
from homeassistant.components.device_tracker.const import (
CONF_CONSIDER_HOME,
DEFAULT_CONSIDER_HOME,
)
from homeassistant.components.fritz.const import (
DOMAIN,
ERROR_AUTH_INVALID,
ERROR_CANNOT_CONNECT,
ERROR_UNKNOWN,
)
from homeassistant.components.ssdp import (
ATTR_SSDP_LOCATION,
ATTR_UPNP_FRIENDLY_NAME,
ATTR_UPNP_UDN,
)
from homeassistant.config_entries import (
SOURCE_IMPORT,
SOURCE_REAUTH,
SOURCE_SSDP,
SOURCE_USER,
)
from homeassistant.const import CONF_DEVICES, CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from . import MOCK_CONFIG, FritzConnectionMock
from tests.common import MockConfigEntry
ATTR_HOST = "host"
ATTR_NEW_SERIAL_NUMBER = "NewSerialNumber"
MOCK_HOST = "fake_host"
MOCK_SERIAL_NUMBER = "fake_serial_number"
MOCK_USER_DATA = MOCK_CONFIG[DOMAIN][CONF_DEVICES][0]
MOCK_DEVICE_INFO = {
ATTR_HOST: MOCK_HOST,
ATTR_NEW_SERIAL_NUMBER: MOCK_SERIAL_NUMBER,
}
MOCK_IMPORT_CONFIG = {CONF_HOST: MOCK_HOST, CONF_USERNAME: "username"}
MOCK_SSDP_DATA = {
ATTR_SSDP_LOCATION: "https://fake_host:12345/test",
ATTR_UPNP_FRIENDLY_NAME: "fake_name",
ATTR_UPNP_UDN: "uuid:only-a-test",
}
MOCK_REQUEST = b'<?xml version="1.0" encoding="utf-8"?><SessionInfo><SID>xxxxxxxxxxxxxxxx</SID><Challenge>xxxxxxxx</Challenge><BlockTime>0</BlockTime><Rights><Name>Dial</Name><Access>2</Access><Name>App</Name><Access>2</Access><Name>HomeAuto</Name><Access>2</Access><Name>BoxAdmin</Name><Access>2</Access><Name>Phone</Name><Access>2</Access><Name>NAS</Name><Access>2</Access></Rights><Users><User last="1">FakeFritzUser</User></Users></SessionInfo>\n'
@pytest.fixture()
def fc_class_mock():
"""Fixture that sets up a mocked FritzConnection class."""
with patch("fritzconnection.FritzConnection", autospec=True) as result:
result.return_value = FritzConnectionMock()
yield result
async def test_user(hass: HomeAssistant, fc_class_mock):
"""Test starting a flow by user."""
with patch(
"homeassistant.components.fritz.common.FritzConnection",
side_effect=fc_class_mock,
), patch("homeassistant.components.fritz.common.FritzStatus"), patch(
"homeassistant.components.fritz.async_setup_entry"
) as mock_setup_entry, patch(
"requests.get"
) as mock_request_get, patch(
"requests.post"
) as mock_request_post:
mock_request_get.return_value.status_code = 200
mock_request_get.return_value.content = MOCK_REQUEST
mock_request_post.return_value.status_code = 200
mock_request_post.return_value.text = MOCK_REQUEST
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_DATA
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert (
result["options"][CONF_CONSIDER_HOME]
== DEFAULT_CONSIDER_HOME.total_seconds()
)
assert not result["result"].unique_id
await hass.async_block_till_done()
assert mock_setup_entry.called
async def test_user_already_configured(hass: HomeAssistant, fc_class_mock):
"""Test starting a flow by user with an already configured device."""
mock_config = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
mock_config.add_to_hass(hass)
with patch(
"homeassistant.components.fritz.common.FritzConnection",
side_effect=fc_class_mock,
), patch("homeassistant.components.fritz.common.FritzStatus"), patch(
"requests.get"
) as mock_request_get, patch(
"requests.post"
) as mock_request_post:
mock_request_get.return_value.status_code = 200
mock_request_get.return_value.content = MOCK_REQUEST
mock_request_post.return_value.status_code = 200
mock_request_post.return_value.text = MOCK_REQUEST
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"]["base"] == "already_configured"
async def test_exception_security(hass: HomeAssistant):
"""Test starting a flow by user with invalid credentials."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
with patch(
"homeassistant.components.fritz.common.FritzConnection",
side_effect=FritzSecurityError,
), patch("homeassistant.components.fritz.common.FritzStatus"):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"]["base"] == ERROR_AUTH_INVALID
async def test_exception_connection(hass: HomeAssistant):
"""Test starting a flow by user with a connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
with patch(
"homeassistant.components.fritz.common.FritzConnection",
side_effect=FritzConnectionException,
), patch("homeassistant.components.fritz.common.FritzStatus"):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"]["base"] == ERROR_CANNOT_CONNECT
async def test_exception_unknown(hass: HomeAssistant):
"""Test starting a flow by user with an unknown exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
with patch(
"homeassistant.components.fritz.common.FritzConnection",
side_effect=OSError,
), patch("homeassistant.components.fritz.common.FritzStatus"):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"]["base"] == ERROR_UNKNOWN
async def test_reauth_successful(hass: HomeAssistant, fc_class_mock):
"""Test starting a reauthentication flow."""
mock_config = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
mock_config.add_to_hass(hass)
with patch(
"homeassistant.components.fritz.common.FritzConnection",
side_effect=fc_class_mock,
), patch("homeassistant.components.fritz.common.FritzStatus"), patch(
"homeassistant.components.fritz.async_setup_entry"
) as mock_setup_entry, patch(
"requests.get"
) as mock_request_get, patch(
"requests.post"
) as mock_request_post:
mock_request_get.return_value.status_code = 200
mock_request_get.return_value.content = MOCK_REQUEST
mock_request_post.return_value.status_code = 200
mock_request_post.return_value.text = MOCK_REQUEST
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH, "entry_id": mock_config.entry_id},
data=mock_config.data,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_USERNAME: "other_fake_user",
CONF_PASSWORD: "other_fake_password",
},
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert mock_setup_entry.called
async def test_reauth_not_successful(hass: HomeAssistant, fc_class_mock):
"""Test starting a reauthentication flow but no connection found."""
mock_config = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
mock_config.add_to_hass(hass)
with patch(
"homeassistant.components.fritz.common.FritzConnection",
side_effect=FritzConnectionException,
), patch("homeassistant.components.fritz.common.FritzStatus"):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH, "entry_id": mock_config.entry_id},
data=mock_config.data,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_USERNAME: "other_fake_user",
CONF_PASSWORD: "other_fake_password",
},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
assert result["errors"]["base"] == "cannot_connect"
async def test_ssdp_already_configured(hass: HomeAssistant, fc_class_mock):
"""Test starting a flow from discovery with an already configured device."""
mock_config = MockConfigEntry(
domain=DOMAIN,
data=MOCK_USER_DATA,
unique_id="only-a-test",
)
mock_config.add_to_hass(hass)
with patch(
"homeassistant.components.fritz.common.FritzConnection",
side_effect=fc_class_mock,
), patch("homeassistant.components.fritz.common.FritzStatus"):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_ssdp_already_configured_host(hass: HomeAssistant, fc_class_mock):
"""Test starting a flow from discovery with an already configured host."""
mock_config = MockConfigEntry(
domain=DOMAIN,
data=MOCK_USER_DATA,
unique_id="different-test",
)
mock_config.add_to_hass(hass)
with patch(
"homeassistant.components.fritz.common.FritzConnection",
side_effect=fc_class_mock,
), patch("homeassistant.components.fritz.common.FritzStatus"):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_ssdp_already_configured_host_uuid(hass: HomeAssistant, fc_class_mock):
"""Test starting a flow from discovery with an already configured uuid."""
mock_config = MockConfigEntry(
domain=DOMAIN,
data=MOCK_USER_DATA,
unique_id=None,
)
mock_config.add_to_hass(hass)
with patch(
"homeassistant.components.fritz.common.FritzConnection",
side_effect=fc_class_mock,
), patch("homeassistant.components.fritz.common.FritzStatus"):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_ssdp_already_in_progress_host(hass: HomeAssistant, fc_class_mock):
"""Test starting a flow from discovery twice."""
with patch(
"homeassistant.components.fritz.common.FritzConnection",
side_effect=fc_class_mock,
), patch("homeassistant.components.fritz.common.FritzStatus"):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
MOCK_NO_UNIQUE_ID = MOCK_SSDP_DATA.copy()
del MOCK_NO_UNIQUE_ID[ATTR_UPNP_UDN]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_NO_UNIQUE_ID
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_in_progress"
async def test_ssdp(hass: HomeAssistant, fc_class_mock):
"""Test starting a flow from discovery."""
with patch(
"homeassistant.components.fritz.common.FritzConnection",
side_effect=fc_class_mock,
), patch("homeassistant.components.fritz.common.FritzStatus"), patch(
"homeassistant.components.fritz.async_setup_entry"
) as mock_setup_entry, patch(
"requests.get"
) as mock_request_get, patch(
"requests.post"
) as mock_request_post:
mock_request_get.return_value.status_code = 200
mock_request_get.return_value.content = MOCK_REQUEST
mock_request_post.return_value.status_code = 200
mock_request_post.return_value.text = MOCK_REQUEST
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_USERNAME: "fake_user",
CONF_PASSWORD: "fake_pass",
},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert mock_setup_entry.called
async def test_ssdp_exception(hass: HomeAssistant):
"""Test starting a flow from discovery but no device found."""
with patch(
"homeassistant.components.fritz.common.FritzConnection",
side_effect=FritzConnectionException,
), patch("homeassistant.components.fritz.common.FritzStatus"):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_USERNAME: "fake_user",
CONF_PASSWORD: "fake_pass",
},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
async def test_import(hass: HomeAssistant, fc_class_mock):
"""Test importing."""
with patch(
"homeassistant.components.fritz.common.FritzConnection",
side_effect=fc_class_mock,
), patch("homeassistant.components.fritz.common.FritzStatus"), patch(
"homeassistant.components.fritz.async_setup_entry"
) as mock_setup_entry, patch(
"requests.get"
) as mock_request_get, patch(
"requests.post"
) as mock_request_post:
mock_request_get.return_value.status_code = 200
mock_request_get.return_value.content = MOCK_REQUEST
mock_request_post.return_value.status_code = 200
mock_request_post.return_value.text = MOCK_REQUEST
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=MOCK_IMPORT_CONFIG
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] is None
assert result["data"][CONF_USERNAME] == "username"
await hass.async_block_till_done()
assert mock_setup_entry.called
async def test_options_flow(hass: HomeAssistant, fc_class_mock):
"""Test options flow."""
mock_config = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
mock_config.add_to_hass(hass)
with patch(
"homeassistant.components.fritz.common.FritzConnection",
side_effect=fc_class_mock,
), patch("homeassistant.components.fritz.common.FritzStatus"), patch(
"homeassistant.components.fritz.common.FritzBoxTools"
):
result = await hass.config_entries.options.async_init(mock_config.entry_id)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_init(mock_config.entry_id)
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_CONSIDER_HOME: 37,
},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert mock_config.options[CONF_CONSIDER_HOME] == 37
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import tempfile
import numpy as np
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.eager import context
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator import util
from tensorflow.python.estimator.export.export import build_all_signature_defs
from tensorflow.python.estimator.export.export import get_temp_export_dir
from tensorflow.python.estimator.export.export import get_timestamped_export_dir
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import evaluation
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
from tensorflow.python.util import nest
_VALID_MODEL_FN_ARGS = set(
['features', 'labels', 'mode', 'params', 'self', 'config'])
class Estimator(object):
"""Estimator class to train and evaluate TensorFlow models.
The `Estimator` object wraps a model which is specified by a `model_fn`,
which, given inputs and a number of other parameters, returns the ops
necessary to perform training, evaluation, or predictions.
All outputs (checkpoints, event files, etc.) are written to `model_dir`, or a
subdirectory thereof. If `model_dir` is not set, a temporary directory is
used.
The `config` argument can be passed `RunConfig` object containing information
about the execution environment. It is passed on to the `model_fn`, if the
`model_fn` has a parameter named "config" (and input functions in the same
manner). If the `config` parameter is not passed, it is instantiated by the
`Estimator`. Not passing config means that defaults useful for local execution
are used. `Estimator` makes config available to the model (for instance, to
allow specialization based on the number of workers available), and also uses
some of its fields to control internals, especially regarding checkpointing.
The `params` argument contains hyperparameters. It is passed to the
`model_fn`, if the `model_fn` has a parameter named "params", and to the input
functions in the same manner. `Estimator` only passes params along, it does
not inspect it. The structure of `params` is therefore entirely up to the
developer.
None of `Estimator`'s methods can be overridden in subclasses (its
constructor enforces this). Subclasses should use `model_fn` to configure
the base class, and may add methods implementing specialized functionality.
@compatibility(eager)
Estimators are not compatible with eager execution.
@end_compatibility
"""
def __init__(self, model_fn, model_dir=None, config=None, params=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: This is the first item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same.
* `labels`: This is the second item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same (for multi-head models). If
mode is `ModeKeys.PREDICT`, `labels=None` will be passed. If
the `model_fn`'s signature does not accept `mode`, the
`model_fn` must still be able to handle `labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`, or `model_dir`.
* Returns:
`EstimatorSpec`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
Raises:
RuntimeError: If eager execution is enabled.
ValueError: parameters of `model_fn` don't match `params`.
ValueError: if this is called via a subclass and if that class overrides
a member of `Estimator`.
"""
if context.in_eager_mode():
raise RuntimeError(
'Estimators are not supported when eager execution is enabled.')
Estimator._assert_members_are_not_overridden(self)
if config is None:
self._config = run_config.RunConfig()
logging.info('Using default config.')
else:
if not isinstance(config, run_config.RunConfig):
raise ValueError(
'config must be an instance of RunConfig, but provided %s.' %
config)
self._config = config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(alanyee): remove this suppression after it is no longer needed
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
# pylint: enable=g-doc-exception
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
self._device_fn = _get_replica_device_setter(self._config)
if model_fn is None:
raise ValueError('model_fn must be provided to Estimator.')
_verify_model_fn_args(model_fn, params)
self._model_fn = model_fn
self._params = copy.deepcopy(params or {})
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return copy.deepcopy(self._config)
@property
def params(self):
return copy.deepcopy(self._params)
@property
def model_fn(self):
"""Returns the model_fn which is bound to self.params.
Returns:
The model_fn with following signature:
`def model_fn(features, labels, mode, config)`
"""
def public_model_fn(features, labels, mode, config):
return self._call_model_fn(features, labels, mode, config)
return public_model_fn
# TODO(ispir): support a list of names
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string or a list of string, name of the tensor.
Returns:
Numpy array - value of the tensor.
Raises:
ValueError: If the Estimator has not produced a checkpoint yet.
"""
_check_checkpoint_available(self.model_dir)
return training.load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
Raises:
ValueError: If the Estimator has not produced a checkpoint yet.
"""
_check_checkpoint_available(self.model_dir)
return [name for name, _ in training.list_variables(self.model_dir)]
def latest_checkpoint(self):
"""Finds the filename of latest saved checkpoint file in `model_dir`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was
found.
"""
return saver.latest_checkpoint(self.model_dir)
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
"""Trains a model given training data input_fn.
Args:
input_fn: Input function returning a tuple of:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the training loop.
steps: Number of steps for which to train model. If `None`, train forever
or train until input_fn generates the `OutOfRange` error or
`StopIteration` exception. 'steps' works incrementally. If you call two
times train(steps=10) then training occurs in total 20 steps. If
`OutOfRange` or `StopIteration` occurs in the middle, training stops
before 20 steps. If you don't want to have incremental behavior please
set `max_steps` instead. If set, `max_steps` must be `None`.
max_steps: Number of total steps for which to train model. If `None`,
train forever or train until input_fn generates the `OutOfRange` error
or `StopIteration` exception. If set, `steps` must be `None`. If
`OutOfRange` or `StopIteration` occurs in the middle, training stops
before `max_steps` steps.
Two calls to `train(steps=100)` means 200 training
iterations. On the other hand, two calls to `train(max_steps=100)` means
that the second call will not do any iteration since first call did
all 100 steps.
saving_listeners: list of `CheckpointSaverListener` objects. Used for
callbacks that run immediately before or after checkpoint savings.
Returns:
`self`, for chaining.
Raises:
ValueError: If both `steps` and `max_steps` are not `None`.
ValueError: If either `steps` or `max_steps` is <= 0.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if steps is not None and steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
if max_steps is not None and max_steps <= 0:
raise ValueError(
'Must specify max_steps > 0, given: {}'.format(max_steps))
if max_steps is not None:
start_step = _load_global_step_from_checkpoint_dir(self._model_dir)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
hooks = _check_hooks_type(hooks)
hooks.extend(self._convert_train_steps_to_hooks(steps, max_steps))
saving_listeners = _check_listeners_type(saving_listeners)
loss = self._train_model(input_fn, hooks, saving_listeners)
logging.info('Loss for final step: %s.', loss)
return self
def _convert_train_steps_to_hooks(self, steps, max_steps):
if steps is not None or max_steps is not None:
return [training.StopAtStepHook(steps, max_steps)]
else:
return []
def evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None,
name=None):
"""Evaluates the model given evaluation data input_fn.
For each step, calls `input_fn`, which returns one batch of data.
Evaluates until:
- `steps` batches are processed, or
- `input_fn` raises an end-of-input exception (`OutOfRangeError` or
`StopIteration`).
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or
`SparseTensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
steps: Number of steps for which to evaluate model. If `None`, evaluates
until `input_fn` raises an end-of-input exception.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the evaluation call.
checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the
latest checkpoint in `model_dir` is used.
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data. Metrics for
different evaluations are saved in separate folders, and appear
separately in tensorboard.
Returns:
A dict containing the evaluation metrics specified in `model_fn` keyed by
name, as well as an entry `global_step` which contains the value of the
global step for which this evaluation was performed.
Raises:
ValueError: If `steps <= 0`.
ValueError: If no model has been trained, namely `model_dir`, or the
given `checkpoint_path` is empty.
"""
hooks = _check_hooks_type(hooks)
hooks.extend(self._convert_eval_steps_to_hooks(steps))
return self._evaluate_model(
input_fn=input_fn,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
def _convert_eval_steps_to_hooks(self, steps):
if steps is None:
return []
if steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
return [evaluation._StopAfterNEvalsHook(num_evals=steps)] # pylint: disable=protected-access
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None):
"""Yields predictions for given features.
Args:
input_fn: Input function returning features which is a dictionary of
string feature name to `Tensor` or `SparseTensor`. If it returns a
tuple, first item is extracted as features. Prediction continues until
`input_fn` raises an end-of-input exception (`OutOfRangeError` or
`StopIteration`).
predict_keys: list of `str`, name of the keys to predict. It is used if
the `EstimatorSpec.predictions` is a `dict`. If `predict_keys` is used
then rest of the predictions will be filtered from the dictionary. If
`None`, returns all.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the prediction call.
checkpoint_path: Path of a specific checkpoint to predict. If `None`, the
latest checkpoint in `model_dir` is used.
Yields:
Evaluated values of `predictions` tensors.
Raises:
ValueError: Could not find a trained model in model_dir.
ValueError: if batch length of predictions are not same.
ValueError: If there is a conflict between `predict_keys` and
`predictions`. For example if `predict_keys` is not `None` but
`EstimatorSpec.predictions` is not a `dict`.
"""
hooks = _check_hooks_type(hooks)
# Check that model has been trained.
if not checkpoint_path:
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise ValueError('Could not find trained model in model_dir: {}.'.format(
self._model_dir))
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
self._create_and_assert_global_step(g)
features = self._get_features_from_input_fn(
input_fn, model_fn_lib.ModeKeys.PREDICT)
estimator_spec = self._call_model_fn(
features, None, model_fn_lib.ModeKeys.PREDICT, self.config)
predictions = self._extract_keys(estimator_spec.predictions, predict_keys)
with training.MonitoredSession(
session_creator=training.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=estimator_spec.scaffold,
config=self._session_config),
hooks=hooks) as mon_sess:
while not mon_sess.should_stop():
preds_evaluated = mon_sess.run(predictions)
if not isinstance(predictions, dict):
for pred in preds_evaluated:
yield pred
else:
for i in range(self._extract_batch_length(preds_evaluated)):
yield {
key: value[i]
for key, value in six.iteritems(preds_evaluated)
}
def _assert_members_are_not_overridden(self):
"""Asserts members of `Estimator` are not overridden."""
allowed_overrides = set(['_call_input_fn', '_create_global_step',
'_convert_train_steps_to_hooks',
'_convert_eval_steps_to_hooks'])
estimator_members = set([m for m in Estimator.__dict__.keys()
if not m.startswith('__')])
subclass_members = set(self.__class__.__dict__.keys())
common_members = estimator_members & subclass_members - allowed_overrides
overridden_members = [
m for m in common_members
if Estimator.__dict__[m] != self.__class__.__dict__[m]]
if overridden_members:
raise ValueError(
'Subclasses of Estimator cannot override members of Estimator. '
'{} does override {}'.format(self.__class__, overridden_members))
def export_savedmodel(
self, export_dir_base, serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
This method builds a new graph by first calling the
serving_input_receiver_fn to obtain feature `Tensor`s, and then calling
this `Estimator`'s model_fn to generate the model graph based on those
features. It restores the given checkpoint (or, lacking that, the most
recent checkpoint) into this graph in a fresh session. Finally it creates
a timestamped export directory below the given export_dir_base, and writes
a `SavedModel` into it containing a single `MetaGraphDef` saved from this
session.
The exported `MetaGraphDef` will provide one `SignatureDef` for each
element of the export_outputs dict returned from the model_fn, named using
the same keys. One of these keys is always
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, indicating which
signature will be served when a serving request does not specify one.
For each signature, the outputs are provided by the corresponding
`ExportOutput`s, and the inputs are always the input receivers provided by
the serving_input_receiver_fn.
Extra assets may be written into the SavedModel via the extra_assets
argument. This should be a dict, where each key gives a destination path
(including the filename) relative to the assets.extra directory. The
corresponding value gives the full path of the source file to be copied.
For example, the simple case of copying a single file without renaming it
is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and
returns a `ServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if no serving_input_receiver_fn is provided, no export_outputs
are provided, or no checkpoint can be found.
"""
if serving_input_receiver_fn is None:
raise ValueError('serving_input_receiver_fn must be defined.')
with ops.Graph().as_default() as g:
self._create_and_assert_global_step(g)
random_seed.set_random_seed(self._config.tf_random_seed)
serving_input_receiver = serving_input_receiver_fn()
# Call the model_fn and collect the export_outputs.
estimator_spec = self._call_model_fn(
features=serving_input_receiver.features,
labels=None,
mode=model_fn_lib.ModeKeys.PREDICT,
config=self.config)
# Build the SignatureDefs from receivers and all outputs
signature_def_map = build_all_signature_defs(
serving_input_receiver.receiver_tensors,
estimator_spec.export_outputs,
serving_input_receiver.receiver_tensors_alternatives)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise ValueError("Couldn't find trained model at %s." % self._model_dir)
export_dir = get_timestamped_export_dir(export_dir_base)
temp_export_dir = get_temp_export_dir(export_dir)
# TODO(soergel): Consider whether MonitoredSession makes sense here
with tf_session.Session() as session:
saver_for_restore = estimator_spec.scaffold.saver or saver.Saver(
sharded=True)
saver_for_restore.restore(session, checkpoint_path)
# TODO(b/36111876): replace legacy_init_op with main_op mechanism
# pylint: disable=protected-access
local_init_op = (
estimator_spec.scaffold.local_init_op or
monitored_session.Scaffold._default_local_init_op())
# pylint: enable=protected-access
# Perform the export
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=local_init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(temp_export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
def _get_features_from_input_fn(self, input_fn, mode):
"""Extracts the `features` from return values of `input_fn`."""
result = self._call_input_fn(input_fn, mode)
if isinstance(result, (list, tuple)):
# Unconditionally drop the label (the second element of result).
result = result[0]
if not _has_dataset_or_queue_runner(result):
logging.warning('Input graph does not use tf.data.Dataset or contain a '
'QueueRunner. That means predict yields forever. '
'This is probably a mistake.')
return result
def _get_features_and_labels_from_input_fn(self, input_fn, mode):
result = self._call_input_fn(input_fn, mode)
if isinstance(result, (list, tuple)):
if len(result) != 2:
raise ValueError(
'input_fn should return (feautures, labels) as a len 2 tuple.')
return result
return result, None
def _extract_batch_length(self, preds_evaluated):
"""Extracts batch length of predictions."""
batch_length = None
for key, value in six.iteritems(preds_evaluated):
batch_length = batch_length or value.shape[0]
if value.shape[0] != batch_length:
raise ValueError('Batch length of predictions should be same. %s has '
'different batch length then others.' % key)
return batch_length
def _extract_keys(self, predictions, predict_keys):
"""Extracts `predict_keys` from `predictions`."""
if not predict_keys:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'predict_keys argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in predict_keys
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, predict_keys))
return predictions
def _create_global_step(self, graph):
"""Creates the global step tensor in graph.
The global step tensor must be an integer type with name 'global_step' and
be added to the collection ${tf.GraphKeys.GLOBAL_STEP}.
Args:
graph: The graph in which to create the global step tensor.
Returns:
The global step `Tensor`.
"""
return training.create_global_step(graph)
def _create_and_assert_global_step(self, graph):
"""Creates and asserts properties of the global step.
Args:
graph: The graph in which to create the global step tensor.
Returns:
The global step `Tensor`.
"""
step = self._create_global_step(graph)
assert step == training.get_global_step()
assert step.dtype.is_integer
return step
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments.
"""
del mode # unused
input_fn_args = util.fn_args(input_fn)
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params
if 'config' in input_fn_args:
kwargs['config'] = self.config
with ops.device('/cpu:0'):
return input_fn(**kwargs)
def _call_model_fn(self, features, labels, mode, config):
"""Calls model function.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
config: RunConfig
Returns:
An `EstimatorSpec` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
model_fn_args = util.fn_args(self._model_fn)
kwargs = {}
if 'labels' in model_fn_args:
kwargs['labels'] = labels
else:
if labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = config
model_fn_results = self._model_fn(features=features, **kwargs)
if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec):
raise ValueError('model_fn should return an EstimatorSpec.')
return model_fn_results
def _train_model(self, input_fn, hooks, saving_listeners):
worker_hooks = []
with ops.Graph().as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step_tensor = self._create_and_assert_global_step(g)
training_util._get_or_create_global_step_read() # pylint: disable=protected-access
features, labels = self._get_features_and_labels_from_input_fn(
input_fn, model_fn_lib.ModeKeys.TRAIN)
estimator_spec = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
# Check if the user created a loss summary, and add one if they didn't.
# We assume here that the summary is called 'loss'. If it is not, we will
# make another one with the name 'loss' to ensure it shows up in the right
# graph in TensorBoard.
if not any([x.op.name == 'loss'
for x in ops.get_collection(ops.GraphKeys.SUMMARIES)]):
summary.scalar('loss', estimator_spec.loss)
ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss)
worker_hooks.extend(hooks)
worker_hooks.extend([
training.NanTensorHook(estimator_spec.loss),
training.LoggingTensorHook(
{
'loss': estimator_spec.loss,
'step': global_step_tensor
},
every_n_iter=100)
])
worker_hooks.extend(estimator_spec.training_hooks)
if not (estimator_spec.scaffold.saver or
ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
training.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
self._config.keep_checkpoint_every_n_hours),
defer_build=True,
save_relative_paths=True))
chief_hooks = []
all_hooks = worker_hooks + list(estimator_spec.training_chief_hooks)
saver_hooks = [
h for h in all_hooks if isinstance(h, training.CheckpointSaverHook)]
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
if not saver_hooks:
chief_hooks = [
training.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=estimator_spec.scaffold)
]
saver_hooks = [chief_hooks[0]]
if saving_listeners:
if not saver_hooks:
raise ValueError(
'There should be a CheckpointSaverHook to use saving_listeners. '
'Please set one of the RunConfig.save_checkpoints_steps or '
'RunConfig.save_checkpoints_secs.')
else:
# It is expected to have one CheckpointSaverHook. If multiple, we pick
# up the first one to add listener.
saver_hooks[0]._listeners.extend(saving_listeners) # pylint: disable=protected-access
with training.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=estimator_spec.scaffold,
hooks=worker_hooks,
chief_only_hooks=(
tuple(chief_hooks) + tuple(estimator_spec.training_chief_hooks)),
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config,
log_step_count_steps=self._config.log_step_count_steps) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
return loss
def _evaluate_model(self,
input_fn,
hooks=None,
checkpoint_path=None,
name=''):
"""Evaluates the model using the training.evaluation library."""
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise ValueError('Could not find trained model in model_dir: {}.'.
format(self._model_dir))
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step_tensor = self._create_and_assert_global_step(g)
features, labels = self._get_features_and_labels_from_input_fn(
input_fn, model_fn_lib.ModeKeys.EVAL)
estimator_spec = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL, self.config)
if model_fn_lib.LOSS_METRIC_KEY in estimator_spec.eval_metric_ops:
raise ValueError(
'Metric with name "%s" is not allowed, because Estimator ' % (
model_fn_lib.LOSS_METRIC_KEY) +
'already defines a default metric with the same name.')
estimator_spec.eval_metric_ops[
model_fn_lib.LOSS_METRIC_KEY] = metrics_lib.mean(estimator_spec.loss)
update_op, eval_dict = _extract_metric_update_ops(
estimator_spec.eval_metric_ops)
if ops.GraphKeys.GLOBAL_STEP in eval_dict:
raise ValueError(
'Metric with name `global_step` is not allowed, because Estimator '
'already defines a default metric with the same name.')
eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor
all_hooks = list(hooks or [])
all_hooks.extend(list(estimator_spec.evaluation_hooks or []))
eval_results = evaluation._evaluate_once( # pylint: disable=protected-access
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=estimator_spec.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=all_hooks,
config=self._session_config)
_write_dict_to_summary(
output_dir=eval_dir,
dictionary=eval_results,
current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])
return eval_results
def _check_checkpoint_available(model_dir):
latest_path = saver.latest_checkpoint(model_dir)
if not latest_path:
raise ValueError(
'Could not find trained model in model_dir: {}.'.format(model_dir))
def _check_hooks_type(hooks):
"""Returns hooks if all are SessionRunHook, raises TypeError otherwise."""
hooks = list(hooks or [])
for h in hooks:
if not isinstance(h, training.SessionRunHook):
raise TypeError('Hooks must be a SessionRunHook, given: {}'.format(h))
return hooks
def _check_listeners_type(saving_listeners):
"""Check listeners type."""
listeners = list(saving_listeners or [])
for l in listeners:
if not isinstance(l, training.CheckpointSaverListener):
raise TypeError(
'saving_listeners must be a list of CheckpointSaverListener, '
'given: {}'.format(l))
return listeners
def _get_replica_device_setter(config):
"""Creates a replica device setter if required as a default device_fn.
`Estimator` uses ReplicaDeviceSetter as a default device placer. It sets the
distributed related arguments such as number of ps_replicas based on given
config.
Args:
config: A `RunConfig` instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return training.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=ps_ops,
cluster=config.cluster_spec)
else:
return None
def _verify_model_fn_args(model_fn, params):
"""Verifies model fn arguments."""
args = set(util.fn_args(model_fn))
if 'features' not in args:
raise ValueError('model_fn (%s) must include features argument.' % model_fn)
if params is not None and 'params' not in args:
raise ValueError('model_fn (%s) does not include params argument, '
'but params (%s) is passed to Estimator.' % (model_fn,
params))
if params is None and 'params' in args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
non_valid_args = list(args - _VALID_MODEL_FN_ARGS)
if non_valid_args:
raise ValueError('model_fn (%s) has following not expected args: %s' %
(model_fn, non_valid_args))
def _load_global_step_from_checkpoint_dir(checkpoint_dir):
try:
checkpoint_reader = training.NewCheckpointReader(
training.latest_checkpoint(checkpoint_dir))
return checkpoint_reader.get_tensor(ops.GraphKeys.GLOBAL_STEP)
except: # pylint: disable=bare-except
return 0
def _extract_metric_update_ops(eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
# Sort metrics lexicographically so graph is identical every time.
for name, metric_ops in sorted(six.iteritems(eval_dict)):
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
if update_ops:
update_op = control_flow_ops.group(*update_ops)
else:
update_op = None
return update_op, value_ops
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v)
for k, v in sorted(six.iteritems(dictionary)))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = writer_cache.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
value.simple_value = int(dictionary[key])
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
def _has_dataset_or_queue_runner(maybe_tensor):
"""Returns True if TF dataset or QueueRunner has been used."""
# Check TF dataset first. Here, we use a simple algorithm to check the top
# level Tensors only, which should be sufficient for most users.
tensors = [x for x in nest.flatten(maybe_tensor) if isinstance(x, ops.Tensor)]
if any([t.op.type == 'IteratorGetNext' for t in tensors]):
return True
# Now, check queue.
return ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS)
| |
import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
class UnaryFunctionsTestBase(unittest.TestCase):
def make_data(self):
raise NotImplementedError
def setUp(self):
self.x, self.gy = self.make_data()
self.check_forward_options = {'atol': 1e-7, 'rtol': 1e-7}
if self.dtype == numpy.float16:
self.check_backward_options = {'atol': 3e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 3e-3, 'rtol': 1e-2}
else:
self.check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_double_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
def check_forward(self, op, op_np, x_data):
x = chainer.Variable(x_data)
y = op(x)
self.assertEqual(x.data.dtype, y.data.dtype)
testing.assert_allclose(
op_np(self.x), y.data, **self.check_forward_options)
def check_forward_cpu(self, op, op_np):
self.check_forward(op, op_np, self.x)
def check_forward_gpu(self, op, op_np):
self.check_forward(op, op_np, cuda.to_gpu(self.x))
def check_backward(self, op, x_data, y_grad):
gradient_check.check_backward(
op, x_data, y_grad, dtype=numpy.float64,
**self.check_backward_options)
def check_backward_cpu(self, op):
self.check_backward(op, self.x, self.gy)
def check_backward_gpu(self, op):
self.check_backward(op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, op, x_data, y_grad, y_grad_grad):
gradient_check.check_double_backward(
op, x_data, y_grad, y_grad_grad, dtype=numpy.float64,
**self.check_double_backward_options)
def check_double_backward_cpu(self, op):
self.check_double_backward(op, self.x, self.gy, self.ggy)
def check_double_backward_gpu(self, op):
self.check_double_backward(op, cuda.to_gpu(
self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggy))
def check_label(self, op, expected):
self.assertEqual(op().label, expected)
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestExp(UnaryFunctionsTestBase):
def make_data(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x, gy
def test_forward_cpu(self):
self.check_forward_cpu(F.exp, numpy.exp)
@attr.gpu
def test_forward_gpu(self):
self.check_forward_gpu(F.exp, numpy.exp)
def test_backward_cpu(self):
self.check_backward_cpu(F.exp)
@attr.gpu
def test_backward_gpu(self):
self.check_backward_gpu(F.exp)
def test_label(self):
self.check_label(F.Exp, 'exp')
def test_double_backward_cpu(self):
self.check_double_backward_cpu(F.exp)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward_gpu(F.exp)
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestLog(UnaryFunctionsTestBase):
def make_data(self):
x = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x, gy
def test_forward_cpu(self):
self.check_forward_cpu(F.log, numpy.log)
@attr.gpu
def test_forward_gpu(self):
self.check_forward_gpu(F.log, numpy.log)
def test_backward_cpu(self):
self.check_backward_cpu(F.log)
@attr.gpu
def test_backward_gpu(self):
self.check_backward_gpu(F.log)
def test_label(self):
self.check_label(F.Log, 'log')
def test_double_backward_cpu(self):
self.check_double_backward_cpu(F.log)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward_gpu(F.log)
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestLog2(UnaryFunctionsTestBase):
def make_data(self):
x = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x, gy
def test_forward_cpu(self):
self.check_forward_cpu(F.log2, numpy.log2)
@attr.gpu
def test_forward_gpu(self):
self.check_forward_gpu(F.log2, numpy.log2)
def test_backward_cpu(self):
self.check_backward_cpu(F.log2)
@attr.gpu
def test_backward_gpu(self):
self.check_backward_gpu(F.log2)
def test_label(self):
self.check_label(F.Log2, 'log2')
def test_double_backward_cpu(self):
self.check_double_backward_cpu(F.log2)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward_gpu(F.log2)
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestLog10(UnaryFunctionsTestBase):
def make_data(self):
x = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x, gy
def test_forward_cpu(self):
self.check_forward_cpu(F.log10, numpy.log10)
@attr.gpu
def test_forward_gpu(self):
self.check_forward_gpu(F.log10, numpy.log10)
def test_backward_cpu(self):
self.check_backward_cpu(F.log10)
@attr.gpu
def test_backward_gpu(self):
self.check_backward_gpu(F.log10)
def test_label(self):
self.check_label(F.Log10, 'log10')
def test_double_backward_cpu(self):
self.check_double_backward_cpu(F.log2)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward_gpu(F.log2)
testing.run_module(__name__, __file__)
| |
""" Juju helpers
"""
import asyncio
import json
import logging
import os
from pathlib import Path
from subprocess import DEVNULL, PIPE, CalledProcessError
from tempfile import NamedTemporaryFile
import yaml
from juju.client.jujudata import FileJujuData
from juju.controller import Controller
from juju.model import Model
from melddict import MeldDict
from conjureup import consts, errors, events, utils
from conjureup.app_config import app
from conjureup.utils import is_linux, juju_path, run, spew
def _check_bin_candidates(candidates, bin_property):
""" Checks a list of binary paths to verify they exist and are
executable
"""
# search candidate paths, in order, for the binary (ie juju, juju-wait)
# we don't use $PATH because we have definite preferences which one we use
# and we don't want to leave it up to the user
if not hasattr(app.juju, bin_property):
raise errors.AppConfigAttributeError(
"Unknown juju property: {}".format(bin_property))
for candidate in candidates:
if os.access(candidate, os.X_OK):
setattr(app.juju, bin_property, candidate)
app.log.debug("{} candidate found".format(bin_property))
break
else:
raise errors.JujuBinaryNotFound(
"Unable to locate a candidate executable for {}.".format(
candidates))
def set_bin_path():
""" Sets the juju binary path
"""
candidates = [
'/snap/bin/juju',
'/snap/bin/conjure-up.juju',
'/usr/bin/juju',
'/usr/local/bin/juju',
]
_check_bin_candidates(candidates, 'bin_path')
# Update $PATH so that we make sure this candidate is used
# first.
app.env['PATH'] = "{}:{}".format(Path(app.juju.bin_path).parent,
app.env['PATH'])
def set_wait_path():
""" Sets juju-wait path
"""
candidates = [
'/snap/bin/juju-wait',
'/snap/bin/conjure-up.juju-wait',
'/usr/bin/juju-wait',
'/usr/local/bin/juju-wait',
]
_check_bin_candidates(candidates, 'wait_path')
def read_config(name):
""" Reads a juju config file
Arguments:
name: filename without extension (ext defaults to yaml)
Returns:
dictionary of yaml object
"""
abs_path = os.path.join(juju_path(), "{}.yaml".format(name))
if not os.path.isfile(abs_path):
raise Exception("Cannot load {}".format(abs_path))
return yaml.safe_load(open(abs_path))
def get_bootstrap_config(controller_name):
try:
bootstrap_config = read_config("bootstrap-config")
except Exception:
# We may be trying to access the bootstrap-config to quickly
# between the time of juju bootstrap occurs and this function
# is accessed.
app.log.exception("Could not load bootstrap-config, "
"setting an empty controllers dict.")
bootstrap_config = dict(controllers={})
if 'controllers' not in bootstrap_config:
raise Exception("Could not read Juju's bootstrap-config.yaml")
cd = bootstrap_config['controllers'].get(controller_name, None)
if cd is None:
raise errors.ControllerNotFoundException(
"'{}' not found in Juju's "
"bootstrap-config.yaml".format(controller_name))
return cd
def get_current_controller():
""" Grabs the current default controller
"""
try:
return get_controllers()['current-controller']
except KeyError:
return None
def get_controller(id):
""" Return specific controller
Arguments:
id: controller id
"""
if 'controllers' in get_controllers() \
and id in get_controllers()['controllers']:
return get_controllers()['controllers'][id]
return None
def get_controller_in_cloud(cloud):
""" Returns a controller that is bootstrapped on the named cloud
Arguments:
cloud: cloud to check for
Returns:
available controller or None if nothing available
"""
controllers = get_controllers()['controllers'].items()
for controller_name, controller in controllers:
if cloud == controller['cloud']:
return controller_name
return None
async def model_available():
""" Check whether selected model is already available.
"""
if app.provider.controller is None:
raise Exception("No controller selected")
if app.provider.model is None:
raise Exception("No model selected.")
controller = Controller(app.loop)
await controller.connect(app.provider.controller)
try:
models = await controller.list_models()
return app.provider.model in models
finally:
await controller.disconnect()
async def connect_model():
""" Connect to the selected model.
"""
if app.provider.controller is None:
raise Exception("No controller selected")
if app.provider.model is None:
raise Exception("No model selected.")
app.juju.client = Model(app.loop)
model_name = '{}:{}'.format(app.provider.controller,
app.provider.model)
await app.juju.client.connect(model_name)
events.ModelConnected.set()
async def create_model():
""" Creates the selected model.
"""
if app.provider.controller is None:
raise Exception("No controller selected")
if app.provider.model is None:
raise Exception("No model selected.")
controller = Controller(app.loop)
await controller.connect(app.provider.controller)
try:
app.juju.client = await controller.add_model(
model_name=app.provider.model,
cloud_name=app.provider.cloud,
region=app.provider.region,
credential_name=app.provider.credential,
config=app.conjurefile.get('model-config', None))
events.ModelConnected.set()
finally:
await controller.disconnect()
async def bootstrap(controller, cloud, model='conjure-up', credential=None):
""" Performs juju bootstrap
If not LXD pass along the newly defined credentials
Arguments:
controller: name of your controller
cloud: name of local or public cloud to deploy to
model: name of default model to create
credential: credentials key
"""
if app.provider.region is not None:
app.log.debug("Bootstrapping to set region: {}")
cloud = "{}/{}".format(app.provider.cloud, app.provider.region)
cmd = [app.juju.bin_path, "bootstrap",
cloud, controller, "--default-model", model]
def add_config(k, v):
cmd.extend(["--config", "{}={}".format(k, v)])
app.provider.model_defaults = MeldDict(app.provider.model_defaults or {})
app.provider.model_defaults.add(app.conjurefile.get('model-config', {}))
if app.provider.model_defaults:
for k, v in app.provider.model_defaults.items():
if v is not None:
add_config(k, v)
add_config("image-stream", "daily")
if app.conjurefile['http-proxy']:
add_config("http-proxy", app.conjurefile['http-proxy'])
if app.conjurefile['https-proxy']:
add_config("https-proxy", app.conjurefile['https-proxy'])
if app.conjurefile['apt-http-proxy']:
add_config("apt-http-proxy", app.conjurefile['apt-http-proxy'])
if app.conjurefile['apt-https-proxy']:
add_config("apt-https-proxy", app.conjurefile['apt-https-proxy'])
if app.conjurefile['no-proxy']:
add_config("no-proxy", app.conjurefile['no-proxy'])
if app.conjurefile['bootstrap-timeout']:
add_config("bootstrap-timeout", app.conjurefile['bootstrap-timeout'])
if app.conjurefile['bootstrap-to']:
cmd.extend(["--to", app.conjurefile['bootstrap-to']])
if app.conjurefile['bootstrap-series']:
cmd.extend(["--bootstrap-series", app.conjurefile['bootstrap-series']])
if credential is not None:
cmd.extend(["--credential", credential])
if app.conjurefile['debug']:
cmd.append("--debug")
app.log.debug("bootstrap cmd: {}".format(cmd))
log_file = '{}-bootstrap'.format(app.provider.controller)
path_base = str(Path(app.config['spell-dir']) / log_file)
out_path = path_base + '.out'
err_path = path_base + '.err'
rc, _, _ = await utils.arun(cmd, stdout=out_path, stderr=err_path)
if rc < 0:
raise errors.BootstrapInterrupt('Bootstrap killed by user')
elif rc > 0:
return False
events.ModelAvailable.set()
return True
def has_jaas_auth():
jaas_cookies = Path('~/.local/share/juju/cookies/jaas.json').expanduser()
if jaas_cookies.exists():
jaas_cookies = json.loads(jaas_cookies.read_text())
for cookie in jaas_cookies or []:
if cookie['Domain'] == consts.JAAS_DOMAIN:
return bool(cookie['Value'])
return False
async def register_controller(name, endpoint, email, password, twofa,
timeout=30, fail_cb=None, timeout_cb=None):
app.log.info('Registering controller {}'.format(name))
cmd = ['juju', 'login', '-B', endpoint, '-c', name]
proc = await asyncio.create_subprocess_exec(
*cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE,
)
try:
stdin = b''.join(b'%s\n' % bytes(f, 'utf8')
for f in [email, password, twofa])
stdout, stderr = await asyncio.wait_for(proc.communicate(stdin),
timeout)
stdout = stdout.decode('utf8')
stderr = stderr.decode('utf8')
prefix = 'Enter a name for this controller: '
if stderr.startswith(prefix):
# Juju has started putting this one prompt out on stderr
# instead of stdout for some reason, so we work around it.
stderr = stderr[len(prefix):]
except asyncio.TimeoutError:
proc.kill()
app.log.warning('Registration timed out')
if timeout_cb:
timeout_cb()
elif fail_cb:
fail_cb('Timed out')
return False
if proc.returncode != 0:
app.log.warning('Registration failed: {}'.format(stderr))
if fail_cb:
fail_cb(stderr)
return False
else:
raise CalledProcessError(cmd, stderr)
app.log.info('Registration complete')
return True
def autoload_credentials():
""" Automatically checks known places for cloud credentials
"""
try:
run('{} autoload-credentials'.format(
app.juju.bin_path), shell=True, check=True)
except CalledProcessError:
return False
return True
def get_credential(cloud, cred_name=None):
""" Get credential
Arguments:
cloud: cloud applicable to user credentials
cred_name: name of credential to get, or default
"""
creds = get_credentials()
if cloud not in creds.keys():
return None
cred = creds[cloud]
default_credential = cred.pop('default-credential', None)
cred.pop('default-region', None)
if cred_name is not None and cred_name in cred.keys():
return cred[cred_name]
elif default_credential is not None and default_credential in cred.keys():
return cred[default_credential]
elif len(cred) == 1:
return list(cred.values())[0]
else:
return None
def get_credentials():
""" Get all locally cached credentials from Juju.
Returns:
Dict of credentials by cloud.
"""
try:
return FileJujuData().credentials()
except FileNotFoundError:
return {}
def get_regions(cloud):
""" List available regions for cloud
Arguments:
cloud: Cloud to list regions for
Returns:
Dictionary of all known regions for cloud
"""
sh = run('{} list-regions {} --format yaml'.format(app.juju.bin_path,
cloud),
shell=True, stdout=PIPE, stderr=PIPE)
stdout = sh.stdout.decode('utf8')
stderr = sh.stderr.decode('utf8')
if sh.returncode > 0:
raise Exception("Unable to list regions: {}".format(stderr))
if 'no regions' in stdout:
return {}
result = yaml.safe_load(stdout)
if not isinstance(result, dict):
msg = 'Unexpected response from regions: {}'.format(result)
app.log.error(msg)
utils.sentry_report(msg, level=logging.ERROR)
result = {}
return result
def get_clouds():
""" List available clouds
Returns:
Dictionary of all known clouds including newly created MAAS/Local
"""
sh = run('{} list-clouds --local --format yaml'.format(app.juju.bin_path),
shell=True, stdout=PIPE, stderr=PIPE)
if sh.returncode > 0:
raise Exception(
"Unable to list clouds: {}".format(sh.stderr.decode('utf8'))
)
return yaml.safe_load(sh.stdout.decode('utf8')) or {}
def get_compatible_clouds(cloud_types=None):
""" List cloud types compatible with the current spell and controller.
Arguments:
clouds: optional initial list of clouds to filter
Returns:
List of cloud types
"""
if cloud_types is None:
clouds = get_clouds()
cloud_types = set(c['type'] for c in clouds.values())
# custom providers don't show up in list-clouds but are valid types
cloud_types |= set(consts.CUSTOM_PROVIDERS)
else:
cloud_types = set(cloud_types)
_normalize_cloud_types(cloud_types)
if not is_linux():
# LXD not available on macOS
cloud_types -= {'localhost'}
if app.provider and app.provider.controller:
# if we already have a controller, we should query
# it via the API for what clouds it supports; for now,
# though, just assume it's JAAS and hard-code the options
cloud_types &= consts.JAAS_CLOUDS
whitelist = set(app.metadata.cloud_whitelist)
blacklist = set(app.metadata.cloud_blacklist)
addons_dir = Path(app.config['spell-dir']) / 'addons'
for addon in app.selected_addons:
addon_file = addons_dir / addon / 'metadata.yaml'
addon_meta = yaml.safe_load(addon_file.read_text())
whitelist.update(addon_meta.get('cloud-whitelist', []))
blacklist.update(addon_meta.get('cloud-blacklist', []))
_normalize_cloud_types(whitelist)
_normalize_cloud_types(blacklist)
if len(whitelist) > 0:
return sorted(cloud_types & whitelist)
elif len(blacklist) > 0:
return sorted(cloud_types ^ blacklist)
return sorted(cloud_types)
def _normalize_cloud_types(cloud_types):
if 'lxd' in cloud_types:
# normalize 'lxd' cloud type to localhost; 'lxd' can happen
# depending on how the controller was bootstrapped
cloud_types -= {'lxd'}
cloud_types |= {'localhost'}
if 'local' in cloud_types:
cloud_types -= {'local'}
cloud_types |= {'localhost'}
if 'aws' in cloud_types:
cloud_types -= {'aws'}
cloud_types |= {'ec2'}
if 'google' in cloud_types:
cloud_types -= {'google'}
cloud_types |= {'gce'}
def get_cloud_types_by_name():
""" Return a mapping of cloud names to their type.
This accounts for some normalizations that get_clouds() doesn't.
"""
clouds = {n: c['type'] for n, c in get_clouds().items()}
# normalize 'lxd' cloud type to localhost; 'lxd' can happen
# depending on how the controller was bootstrapped
for name, cloud_type in clouds.items():
if cloud_type == 'lxd':
clouds[name] = 'localhost'
for provider in consts.CUSTOM_PROVIDERS:
if provider not in clouds:
clouds[provider] = provider
return clouds
def add_cloud(name, config):
""" Adds a cloud
Arguments:
name: name of cloud to add
config: cloud configuration
"""
_config = {
'clouds': {
name: config
}
}
app.log.debug(_config)
with NamedTemporaryFile(mode='w', encoding='utf-8',
delete=False) as tempf:
output = yaml.safe_dump(_config, default_flow_style=False)
spew(tempf.name, output)
sh = run('{} add-cloud {} {}'.format(app.juju.bin_path,
name, tempf.name),
shell=True, stdout=PIPE, stderr=PIPE)
if sh.returncode > 0:
raise Exception(
"Unable to add cloud: {}".format(sh.stderr.decode('utf8')))
def get_cloud(name):
""" Return specific cloud information
Arguments:
name: name of cloud to query, ie. aws, lxd, local:provider
Returns:
Dictionary of cloud attributes
"""
if name in get_clouds().keys():
return get_clouds()[name]
raise LookupError("Unable to locate cloud: {}".format(name))
def constraints_to_dict(constraints):
"""
Parses a constraint string into a dict. If tags and spaces are found they
will be converted into a list. All other constraints are passed directly to
juju for processing during deployment.
"""
new_constraints = {}
if not isinstance(constraints, str):
app.log.debug(
"Invalid constraints: {}, skipping".format(
constraints))
return new_constraints
list_constraints = [c for c in constraints.split(' ')
if c != ""]
for c in list_constraints:
try:
constraint, value = c.split('=')
if constraint in ['tags', 'spaces']:
value = value.split(',')
else:
pass
new_constraints[constraint] = value
except ValueError as e:
app.log.debug("Skipping constraint: {} ({})".format(c, e))
return new_constraints
def constraints_from_dict(cdict):
return " ".join(["{}={}".format(k, v) for k, v in cdict.items()])
def deploy(bundle):
""" Juju deploy bundle
Arguments:
bundle: Name of bundle to deploy, can be a path to local bundle file or
charmstore path.
"""
try:
return run('{} deploy {}'.format(app.juju.bin_path,
bundle), shell=True,
stdout=DEVNULL, stderr=PIPE)
except CalledProcessError as e:
raise e
def get_controller_info(name=None):
""" Returns information on current controller
Arguments:
name: if set shows info controller, otherwise displays current.
"""
cmd = '{} show-controller --format yaml'.format(
app.juju.bin_path)
if name is not None:
cmd += ' {}'.format(name)
sh = run(cmd, shell=True, stdout=PIPE, stderr=PIPE)
sh_out = sh.stdout.decode('utf8')
sh_err = sh.stderr.decode('utf8')
try:
data = yaml.safe_load(sh_out)
except yaml.parser.ParserError:
data = None
if sh.returncode != 0 or not data:
raise Exception("Unable to get info for "
"controller {}: {}".format(name, sh_err))
return next(iter(data.values()))
def get_controllers():
""" List available controllers
Returns:
List of known controllers
"""
sh = run('{} list-controllers --format yaml'.format(
app.juju.bin_path),
shell=True, stdout=PIPE, stderr=PIPE)
if sh.returncode > 0:
raise LookupError(
"Unable to list controllers: {}".format(sh.stderr.decode('utf8')))
env = yaml.safe_load(sh.stdout.decode('utf8'))
return env
def get_account(controller):
""" List account information for controller
Arguments:
controller: controller id
Returns:
Dictionary containing list of accounts for controller and the
current account in use.
"""
return get_accounts().get(controller, {})
def get_accounts():
""" List available accounts
Returns:
List of known accounts
"""
env = os.path.join(juju_path(), 'accounts.yaml')
if not os.path.isfile(env):
raise Exception(
"Unable to find: {}".format(env))
with open(env, 'r') as c:
env = yaml.load(c)
return env['controllers']
raise Exception("Unable to find accounts")
def get_model(controller, name):
""" List information for model
Arguments:
name: model name
controller: name of controller to work in
Returns:
Dictionary of model information
"""
models = get_models(controller)['models']
for m in models:
if m['short-name'] == name:
return m
raise LookupError(
"Unable to find model: {}".format(name))
async def destroy_model(controller, model):
""" Destroys a model within a controller
Arguments:
controller: name of controller
model: name of model to destroy
"""
proc = await asyncio.create_subprocess_exec(
'juju', 'destroy-model', '-y', ':'.join([controller, model]),
stdout=DEVNULL, stderr=PIPE)
_, stderr = await proc.communicate()
if proc.returncode > 0:
raise Exception(
"Unable to destroy model: {}".format(stderr.decode('utf8')))
events.ModelAvailable.clear()
def get_models(controller):
""" List available models
Arguments:
controller: existing controller to get models for
Returns:
List of known models
"""
sh = run('{} list-models --format yaml -c {}'.format(app.juju.bin_path,
controller),
shell=True, stdout=PIPE, stderr=PIPE)
if sh.returncode > 0:
raise LookupError(
"Unable to list models: {}".format(sh.stderr.decode('utf8')))
out = yaml.safe_load(sh.stdout.decode('utf8'))
return out
def get_current_model():
try:
return get_models()['current-model']
except:
return None
def version():
""" Returns version of Juju
"""
sh = run('{} version'.format(
app.juju.bin_path),
shell=True, stdout=PIPE, stderr=PIPE)
if sh.returncode > 0:
raise Exception(
"Unable to get Juju Version: {}".format(sh.stderr.decode('utf8')))
out = sh.stdout.decode('utf8')
if isinstance(out, list):
return out.pop()
else:
return out
async def wait_for_deployment(retries=3):
""" Waits for all deployed applications to settle
"""
if 'CONJURE_UP_MODE' in app.env and app.env['CONJURE_UP_MODE'] == "test":
retries = 0
cmd = [app.juju.wait_path, "-r{}".format(retries),
"-vwm", "{}:{}".format(app.provider.controller,
app.provider.model)]
out_path = str(Path(app.config['spell-dir']) / 'deploy-wait.out')
err_path = str(Path(app.config['spell-dir']) / 'deploy-wait.err')
ret, _, err_log = await utils.arun(cmd, stdout=out_path, stderr=err_path)
if ret != 0:
err_log_tail = err_log.splitlines()[-10:]
app.log.error('\n'.join(err_log_tail))
raise errors.DeploymentFailure(
"Some applications failed to start successfully.")
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class WorkflowProvisioningState(Enum):
not_specified = "NotSpecified"
accepted = "Accepted"
running = "Running"
ready = "Ready"
creating = "Creating"
created = "Created"
deleting = "Deleting"
deleted = "Deleted"
canceled = "Canceled"
failed = "Failed"
succeeded = "Succeeded"
moving = "Moving"
updating = "Updating"
registering = "Registering"
registered = "Registered"
unregistering = "Unregistering"
unregistered = "Unregistered"
completed = "Completed"
class WorkflowState(Enum):
not_specified = "NotSpecified"
completed = "Completed"
enabled = "Enabled"
disabled = "Disabled"
deleted = "Deleted"
suspended = "Suspended"
class SkuName(Enum):
not_specified = "NotSpecified"
free = "Free"
shared = "Shared"
basic = "Basic"
standard = "Standard"
premium = "Premium"
class ParameterType(Enum):
not_specified = "NotSpecified"
string = "String"
secure_string = "SecureString"
int_enum = "Int"
float_enum = "Float"
bool_enum = "Bool"
array = "Array"
object_enum = "Object"
secure_object = "SecureObject"
class WorkflowTriggerProvisioningState(Enum):
not_specified = "NotSpecified"
accepted = "Accepted"
running = "Running"
ready = "Ready"
creating = "Creating"
created = "Created"
deleting = "Deleting"
deleted = "Deleted"
canceled = "Canceled"
failed = "Failed"
succeeded = "Succeeded"
moving = "Moving"
updating = "Updating"
registering = "Registering"
registered = "Registered"
unregistering = "Unregistering"
unregistered = "Unregistered"
completed = "Completed"
class WorkflowStatus(Enum):
not_specified = "NotSpecified"
paused = "Paused"
running = "Running"
waiting = "Waiting"
succeeded = "Succeeded"
skipped = "Skipped"
suspended = "Suspended"
cancelled = "Cancelled"
failed = "Failed"
faulted = "Faulted"
timed_out = "TimedOut"
aborted = "Aborted"
ignored = "Ignored"
class RecurrenceFrequency(Enum):
not_specified = "NotSpecified"
second = "Second"
minute = "Minute"
hour = "Hour"
day = "Day"
week = "Week"
month = "Month"
year = "Year"
class DaysOfWeek(Enum):
sunday = "Sunday"
monday = "Monday"
tuesday = "Tuesday"
wednesday = "Wednesday"
thursday = "Thursday"
friday = "Friday"
saturday = "Saturday"
class DayOfWeek(Enum):
sunday = "Sunday"
monday = "Monday"
tuesday = "Tuesday"
wednesday = "Wednesday"
thursday = "Thursday"
friday = "Friday"
saturday = "Saturday"
class KeyType(Enum):
not_specified = "NotSpecified"
primary = "Primary"
secondary = "Secondary"
class IntegrationAccountSkuName(Enum):
not_specified = "NotSpecified"
free = "Free"
standard = "Standard"
class SchemaType(Enum):
not_specified = "NotSpecified"
xml = "Xml"
class MapType(Enum):
not_specified = "NotSpecified"
xslt = "Xslt"
class PartnerType(Enum):
not_specified = "NotSpecified"
b2_b = "B2B"
class AgreementType(Enum):
not_specified = "NotSpecified"
as2 = "AS2"
x12 = "X12"
edifact = "Edifact"
class HashingAlgorithm(Enum):
not_specified = "NotSpecified"
none = "None"
md5 = "MD5"
sha1 = "SHA1"
sha2256 = "SHA2256"
sha2384 = "SHA2384"
sha2512 = "SHA2512"
class EncryptionAlgorithm(Enum):
not_specified = "NotSpecified"
none = "None"
des3 = "DES3"
rc2 = "RC2"
aes128 = "AES128"
aes192 = "AES192"
aes256 = "AES256"
class SigningAlgorithm(Enum):
not_specified = "NotSpecified"
default = "Default"
sha1 = "SHA1"
sha2256 = "SHA2256"
sha2384 = "SHA2384"
sha2512 = "SHA2512"
class TrailingSeparatorPolicy(Enum):
not_specified = "NotSpecified"
not_allowed = "NotAllowed"
optional = "Optional"
mandatory = "Mandatory"
class X12CharacterSet(Enum):
not_specified = "NotSpecified"
basic = "Basic"
extended = "Extended"
utf8 = "UTF8"
class SegmentTerminatorSuffix(Enum):
not_specified = "NotSpecified"
none = "None"
cr = "CR"
lf = "LF"
crlf = "CRLF"
class X12DateFormat(Enum):
not_specified = "NotSpecified"
ccyymmdd = "CCYYMMDD"
yymmdd = "YYMMDD"
class X12TimeFormat(Enum):
not_specified = "NotSpecified"
hhmm = "HHMM"
hhmmss = "HHMMSS"
hhmms_sdd = "HHMMSSdd"
hhmms_sd = "HHMMSSd"
class UsageIndicator(Enum):
not_specified = "NotSpecified"
test = "Test"
information = "Information"
production = "Production"
class MessageFilterType(Enum):
not_specified = "NotSpecified"
include = "Include"
exclude = "Exclude"
class EdifactCharacterSet(Enum):
not_specified = "NotSpecified"
unob = "UNOB"
unoa = "UNOA"
unoc = "UNOC"
unod = "UNOD"
unoe = "UNOE"
unof = "UNOF"
unog = "UNOG"
unoh = "UNOH"
unoi = "UNOI"
unoj = "UNOJ"
unok = "UNOK"
unox = "UNOX"
unoy = "UNOY"
keca = "KECA"
class EdifactDecimalIndicator(Enum):
not_specified = "NotSpecified"
comma = "Comma"
decimal_enum = "Decimal"
| |
# Copyright 2012 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the conductor service."""
import contextlib
import mock
import mox
from oslo.config import cfg
from oslo import messaging
from oslo.serialization import jsonutils
from oslo.utils import timeutils
from nova.api.ec2 import ec2utils
from nova.compute import arch
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova.conductor import api as conductor_api
from nova.conductor import manager as conductor_manager
from nova.conductor import rpcapi as conductor_rpcapi
from nova.conductor.tasks import live_migrate
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import quotas as quotas_obj
from nova import quota
from nova import rpc
from nova.scheduler import driver as scheduler_driver
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import cast_as_call
from nova.tests.unit.compute import test_compute
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_notifier
from nova.tests.unit import fake_server_actions
from nova.tests.unit import fake_utils
from nova import utils
CONF = cfg.CONF
CONF.import_opt('report_interval', 'nova.service')
FAKE_IMAGE_REF = 'fake-image-ref'
class FakeContext(context.RequestContext):
def elevated(self):
"""Return a consistent elevated context so we can detect it."""
if not hasattr(self, '_elevated'):
self._elevated = super(FakeContext, self).elevated()
return self._elevated
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
fake_utils.stub_out_utils_spawn_n(self.stubs)
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
type_id = flavors.get_flavor_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = arch.X86_64
inst['os_type'] = 'Linux'
inst['availability_zone'] = 'fake-az'
inst.update(params)
return db.instance_create(self.context, inst)
def _do_update(self, instance_uuid, **updates):
return self.conductor.instance_update(self.context, instance_uuid,
updates, None)
def test_instance_update(self):
instance = self._create_fake_instance()
new_inst = self._do_update(instance['uuid'],
vm_state=vm_states.STOPPED)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db is None:
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
def test_migration_get_in_progress_by_host_and_node(self):
self.mox.StubOutWithMock(db,
'migration_get_in_progress_by_host_and_node')
db.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node')
self.assertEqual(result, 'fake-result')
def test_aggregate_metadata_get_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
db.aggregate_metadata_get_by_host(self.context, 'host',
'key').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_get_by_host(self.context,
'host', 'key')
self.assertEqual(result, 'result')
def test_bw_usage_update(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_update(*update_args, update_cells=True)
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_update(*update_args,
update_cells=True)
self.assertEqual(result, 'foo')
def test_provider_fw_rule_get_all(self):
fake_rules = ['a', 'b', 'c']
self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
self.mox.ReplayAll()
result = self.conductor.provider_fw_rule_get_all(self.context)
self.assertEqual(result, fake_rules)
def test_block_device_mapping_get_all_by_instance(self):
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
db.block_device_mapping_get_all_by_instance(
self.context, fake_inst['uuid']).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.block_device_mapping_get_all_by_instance(
self.context, fake_inst, legacy=False)
self.assertEqual(result, 'fake-result')
def test_vol_usage_update(self):
self.mox.StubOutWithMock(db, 'vol_usage_update')
self.mox.StubOutWithMock(compute_utils, 'usage_volume_info')
fake_inst = {'uuid': 'fake-uuid',
'project_id': 'fake-project',
'user_id': 'fake-user',
'availability_zone': 'fake-az',
}
db.vol_usage_update(self.context, 'fake-vol', 22, 33, 44, 55,
fake_inst['uuid'],
fake_inst['project_id'],
fake_inst['user_id'],
fake_inst['availability_zone'],
False).AndReturn('fake-usage')
compute_utils.usage_volume_info('fake-usage').AndReturn('fake-info')
self.mox.ReplayAll()
self.conductor.vol_usage_update(self.context, 'fake-vol',
22, 33, 44, 55, fake_inst, None, False)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('conductor.%s' % self.conductor_manager.host,
msg.publisher_id)
self.assertEqual('volume.usage', msg.event_type)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake-info', msg.payload)
def test_compute_node_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_compute_node_update(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, node['id'], {'fake': 'values'}).\
AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_update(self.context, node,
{'fake': 'values'})
self.assertEqual(result, 'fake-result')
def test_compute_node_delete(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_delete')
db.compute_node_delete(self.context, node['id']).AndReturn(None)
self.mox.ReplayAll()
result = self.conductor.compute_node_delete(self.context, node)
self.assertIsNone(result)
def test_task_log_get(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
'state').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', 'state')
self.assertEqual(result, 'result')
def test_task_log_get_with_no_state(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end',
'host', None).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', None)
self.assertEqual(result, 'result')
def test_task_log_begin_task(self):
self.mox.StubOutWithMock(db, 'task_log_begin_task')
db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
'end', 'host', 'items',
'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_begin_task(
self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
self.assertEqual(result, 'result')
def test_task_log_end_task(self):
self.mox.StubOutWithMock(db, 'task_log_end_task')
db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
'host', 'errors', 'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_end_task(
self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
self.assertEqual(result, 'result')
def test_notify_usage_exists(self):
info = {
'audit_period_beginning': 'start',
'audit_period_ending': 'end',
'bandwidth': 'bw_usage',
'image_meta': {},
'extra': 'info',
}
instance = {
'system_metadata': [],
}
self.mox.StubOutWithMock(notifications, 'audit_period_bounds')
self.mox.StubOutWithMock(notifications, 'bandwidth_usage')
self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage')
notifications.audit_period_bounds(False).AndReturn(('start', 'end'))
notifications.bandwidth_usage(instance, 'start', True).AndReturn(
'bw_usage')
notifier = self.conductor_manager.notifier
compute_utils.notify_about_instance_usage(notifier,
self.context, instance,
'exists',
system_metadata={},
extra_usage_info=info)
self.mox.ReplayAll()
self.conductor.notify_usage_exists(self.context, instance, False, True,
system_metadata={},
extra_usage_info=dict(extra='info'))
def test_security_groups_trigger_members_refresh(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_members_refresh')
self.conductor_manager.security_group_api.trigger_members_refresh(
self.context, [1, 2, 3])
self.mox.ReplayAll()
self.conductor.security_groups_trigger_members_refresh(self.context,
[1, 2, 3])
def test_get_ec2_ids(self):
expected = {
'instance-id': 'ec2-inst-id',
'ami-id': 'ec2-ami-id',
'kernel-id': 'ami-kernel-ec2-kernelid',
'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
}
inst = {
'uuid': 'fake-uuid',
'kernel_id': 'ec2-kernelid',
'ramdisk_id': 'ec2-ramdiskid',
'image_ref': 'fake-image',
}
self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
self.mox.StubOutWithMock(ec2utils, 'image_type')
ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
expected['instance-id'])
ec2utils.glance_id_to_ec2_id(self.context,
inst['image_ref']).AndReturn(
expected['ami-id'])
for image_type in ['kernel', 'ramdisk']:
image_id = inst['%s_id' % image_type]
ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
ec2utils.glance_id_to_ec2_id(self.context, image_id,
'ami-' + image_type).AndReturn(
'ami-%s-ec2-%sid' % (image_type, image_type))
self.mox.ReplayAll()
result = self.conductor.get_ec2_ids(self.context, inst)
self.assertEqual(result, expected)
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
self.conductor_manager = self.conductor
def test_instance_get_by_uuid(self):
orig_instance = self._create_fake_instance()
copy_instance = self.conductor.instance_get_by_uuid(
self.context, orig_instance['uuid'], None)
self.assertEqual(orig_instance['name'],
copy_instance['name'])
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 1, 'device_name': 'foo',
'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume'}
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(fake_bdm)
fake_bdm2 = {'id': 1, 'device_name': 'foo2',
'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume'}
fake_bdm2 = fake_block_device.FakeDbBlockDeviceDict(fake_bdm2)
cells_rpcapi = self.conductor.cells_rpcapi
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(cells_rpcapi,
'bdm_update_or_create_at_top')
db.block_device_mapping_create(self.context,
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(
self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
create=True)
db.block_device_mapping_update(self.context, fake_bdm['id'],
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(
self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
create=False)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=False)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
None, False)
def test_instance_get_all_by_filters_use_slave(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=True)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None,
use_slave=True)
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(),
'host', None).AndReturn('result')
db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
'node').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context, 'host',
None, None)
self.assertEqual(result, 'result')
result = self.conductor.instance_get_all_by_host(self.context, 'host',
'node', None)
self.assertEqual(result, 'result')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(messaging.ExpectedException,
self.conductor.service_get_all_by,
self.context, **condargs)
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (),
dict(host=None, topic=None, binary=None))
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host', binary=None))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic', host=None, binary=None))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host', topic=None, binary=None))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'args')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['args'])
def _test_object_action(self, is_classmethod, raise_exception):
class TestObject(obj_base.NovaObject):
def foo(self, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
@classmethod
def bar(cls, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
obj = TestObject()
if is_classmethod:
result = self.conductor.object_class_action(
self.context, TestObject.obj_name(), 'bar', '1.0',
tuple(), {'raise_exception': raise_exception})
else:
updates, result = self.conductor.object_action(
self.context, obj, 'foo', tuple(),
{'raise_exception': raise_exception})
self.assertEqual('test', result)
def test_object_action(self):
self._test_object_action(False, False)
def test_object_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, False, True)
def test_object_class_action(self):
self._test_object_action(True, False)
def test_object_class_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, True, True)
def test_object_action_copies_object(self):
class TestObject(obj_base.NovaObject):
fields = {'dict': fields.DictOfStringsField()}
def touch_dict(self, context):
self.dict['foo'] = 'bar'
self.obj_reset_changes()
obj = TestObject()
obj.dict = {}
obj.obj_reset_changes()
updates, result = self.conductor.object_action(
self.context, obj, 'touch_dict', tuple(), {})
# NOTE(danms): If conductor did not properly copy the object, then
# the new and reference copies of the nested dict object will be
# the same, and thus 'dict' will not be reported as changed
self.assertIn('dict', updates)
self.assertEqual({'foo': 'bar'}, updates['dict'])
def _test_expected_exceptions(self, db_method, conductor_method, errors,
*args, **kwargs):
# Tests that expected exceptions are handled properly.
for error in errors:
with mock.patch.object(db, db_method, side_effect=error):
self.assertRaises(messaging.ExpectedException,
conductor_method,
self.context, *args, **kwargs)
def test_action_event_start_expected_exceptions(self):
error = exc.InstanceActionNotFound(request_id='1', instance_uuid='2')
self._test_expected_exceptions(
'action_event_start', self.conductor.action_event_start, [error],
{'foo': 'bar'})
def test_action_event_finish_expected_exceptions(self):
errors = (exc.InstanceActionNotFound(request_id='1',
instance_uuid='2'),
exc.InstanceActionEventNotFound(event='1', action_id='2'))
self._test_expected_exceptions(
'action_event_finish', self.conductor.action_event_finish,
errors, {'foo': 'bar'})
def test_instance_update_expected_exceptions(self):
errors = (exc.InvalidUUID(uuid='foo'),
exc.InstanceNotFound(instance_id=1),
exc.UnexpectedTaskStateError(expected='foo',
actual='bar'))
self._test_expected_exceptions(
'instance_update', self.conductor.instance_update,
errors, None, {'foo': 'bar'}, None)
def test_instance_get_by_uuid_expected_exceptions(self):
error = exc.InstanceNotFound(instance_id=1)
self._test_expected_exceptions(
'instance_get_by_uuid', self.conductor.instance_get_by_uuid,
[error], None, [])
def test_aggregate_host_add_expected_exceptions(self):
error = exc.AggregateHostExists(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_add', self.conductor.aggregate_host_add,
[error], {'id': 1}, None)
def test_aggregate_host_delete_expected_exceptions(self):
error = exc.AggregateHostNotFound(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_delete', self.conductor.aggregate_host_delete,
[error], {'id': 1}, None)
def test_service_update_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_update',
self.conductor.service_update,
[error], {'id': 1}, None)
def test_service_destroy_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_destroy',
self.conductor.service_destroy,
[error], 1)
def _setup_aggregate_with_host(self):
aggregate_ref = db.aggregate_create(self.context.elevated(),
{'name': 'foo'}, metadata={'availability_zone': 'foo'})
self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
return aggregate_ref
def test_aggregate_host_add(self):
aggregate_ref = self._setup_aggregate_with_host()
self.assertIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_host_delete(self):
aggregate_ref = self._setup_aggregate_with_host()
self.conductor.aggregate_host_delete(self.context, aggregate_ref,
'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
self.assertNotIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_network_migrate_instance_start(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_start')
self.conductor_manager.network_api.migrate_instance_start(self.context,
'instance',
'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_start(self.context,
'instance',
'migration')
def test_network_migrate_instance_finish(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_finish')
self.conductor_manager.network_api.migrate_instance_finish(
self.context, 'instance', 'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_finish(self.context,
'instance',
'migration')
def test_instance_destroy(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
db.instance_destroy(self.context, 'fake-uuid').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_destroy(self.context,
{'uuid': 'fake-uuid'})
self.assertEqual(result, 'fake-result')
def test_compute_unrescue(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'unrescue')
self.conductor_manager.compute_api.unrescue(self.context, 'instance')
self.mox.ReplayAll()
self.conductor.compute_unrescue(self.context, 'instance')
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
'fake-end', 'fake-proj',
'fake-host')
self.mox.ReplayAll()
self.conductor.instance_get_active_by_window_joined(
self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
def test_instance_fault_create(self):
self.mox.StubOutWithMock(db, 'instance_fault_create')
db.instance_fault_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_fault_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_action_event_start(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_start(self.context, {})
def test_action_event_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_finish(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_finish(self.context, {})
def test_agent_build_get_by_triple(self):
self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
'fake-arch').AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.agent_build_get_by_triple(self.context,
'fake-hv',
'fake-os',
'fake-arch')
self.assertEqual(result, 'it worked')
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor_manager = self.conductor_service.manager
self.conductor = conductor_rpcapi.ConductorAPI()
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
'_from_db_object')
db.block_device_mapping_create(self.context, fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update_or_create(self.context, fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (),
dict(topic=None, host=None, binary=None))
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host', binary=None))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic', host=None, binary=None))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host', topic=None, binary=None))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['arg'])
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_time_big(self, mock_prepare, mock_update):
CONF.set_override('report_interval', 10)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with(timeout=9)
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_time_small(self, mock_prepare, mock_update):
CONF.set_override('report_interval', 3)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with(timeout=3)
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_no_time(self, mock_prepare, mock_update):
CONF.set_override('report_interval', None)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with()
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests."""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
self.conductor_manager = self.conductor_service.manager
self.db = None
def _do_update(self, instance_uuid, **updates):
# NOTE(danms): the public API takes actual keyword arguments,
# so override the base class here to make the call correctly
return self.conductor.instance_update(self.context, instance_uuid,
**updates)
def test_bw_usage_get(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_get(*get_args)
self.assertEqual(result, 'foo')
def test_block_device_mapping_update_or_create(self):
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
'_from_db_object')
db.block_device_mapping_create(self.context, 'fake-bdm')
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update(self.context,
'fake-id', {'id': 'fake-id'})
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
self.conductor.block_device_mapping_update_or_create(self.context,
'fake-bdm')
def _test_stubbed(self, name, *args, **kwargs):
if args and isinstance(args[0], FakeContext):
ctxt = args[0]
args = args[1:]
else:
ctxt = self.context
db_exception = kwargs.get('db_exception')
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(ctxt, *args).AndRaise(db_exception)
else:
getattr(db, name)(ctxt, *args).AndReturn('fake-result')
if name == 'service_destroy':
# TODO(russellb) This is a hack ... SetUp() starts the conductor()
# service. There is a cleanup step that runs after this test which
# also deletes the associated service record. This involves a call
# to db.service_destroy(), which we have stubbed out.
db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
getattr(self.conductor, name),
self.context, *args)
else:
result = getattr(self.conductor, name)(self.context, *args)
self.assertEqual(
result, 'fake-result' if kwargs.get('returns', True) else None)
def test_service_get_all(self):
self._test_stubbed('service_get_all')
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic', 'topic')
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host', 'host')
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host', 'host')
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args', 'host', 'binary')
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host', 'host',
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args', 'host', 'binary',
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_service_create(self):
self._test_stubbed('service_create', {})
def test_service_destroy(self):
self._test_stubbed('service_destroy', '', returns=False)
def test_service_update(self):
ctxt = self.context
self.mox.StubOutWithMock(db, 'service_update')
db.service_update(ctxt, '', {}).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_update(self.context, {'id': ''}, {})
self.assertEqual(result, 'fake-result')
def test_instance_get_all_by_host_and_node(self):
self._test_stubbed('instance_get_all_by_host_and_node',
self.context.elevated(), 'host', 'node')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(), 'host',
None).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context,
'host', None)
self.assertEqual(result, 'fake-result')
def test_wait_until_ready(self):
timeouts = []
calls = dict(count=0)
def fake_ping(context, message, timeout):
timeouts.append(timeout)
calls['count'] += 1
if calls['count'] < 15:
raise messaging.MessagingTimeout("fake")
self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping)
self.conductor.wait_until_ready(self.context)
self.assertEqual(timeouts.count(10), 10)
self.assertIn(None, timeouts)
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', 'arg')
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
self.conductor_manager = self.conductor._manager._target
self.db = db
def test_client_exceptions(self):
instance = self._create_fake_instance()
# NOTE(danms): The LocalAPI should not raise exceptions wrapped
# in ClientException. KeyError should be raised if an invalid
# update key is passed, so use that to validate.
self.assertRaises(KeyError,
self._do_update, instance['uuid'], foo='bar')
def test_wait_until_ready(self):
# Override test in ConductorAPITestCase
pass
class ConductorImportTest(test.TestCase):
def test_import_conductor_local(self):
self.flags(use_local=True, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.LocalComputeTaskAPI)
def test_import_conductor_rpc(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.API)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.ComputeTaskAPI)
def test_import_conductor_override_to_local(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(use_local=True),
conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(use_local=True),
conductor_api.LocalComputeTaskAPI)
class ConductorPolicyTest(test.TestCase):
def test_all_allowed_keys(self):
def fake_db_instance_update(self, *args, **kwargs):
return None, None
self.stubs.Set(db, 'instance_update_and_get_original',
fake_db_instance_update)
ctxt = context.RequestContext('fake-user', 'fake-project')
conductor = conductor_api.LocalAPI()
updates = {}
for key in conductor_manager.allowed_updates:
if key in conductor_manager.datetime_fields:
updates[key] = timeutils.utcnow()
else:
updates[key] = 'foo'
conductor.instance_update(ctxt, 'fake-instance', **updates)
def test_allowed_keys_are_real(self):
instance = models.Instance()
keys = list(conductor_manager.allowed_updates)
# NOTE(danms): expected_task_state is a parameter that gets
# passed to the db layer, but is not actually an instance attribute
del keys[keys.index('expected_task_state')]
for key in keys:
self.assertTrue(hasattr(instance, key))
class _BaseTaskTestCase(object):
def setUp(self):
super(_BaseTaskTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_server_actions.stub_out_action_events(self.stubs)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
def _prepare_rebuild_args(self, update_args=None):
rebuild_args = {'new_pass': 'admin_password',
'injected_files': 'files_to_inject',
'image_ref': 'image_ref',
'orig_image_ref': 'orig_image_ref',
'orig_sys_metadata': 'orig_sys_meta',
'bdms': {},
'recreate': False,
'on_shared_storage': False,
'preserve_ephemeral': False,
'host': 'compute-host'}
if update_args:
rebuild_args.update(update_args)
return rebuild_args
def test_live_migrate(self):
inst = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
live_migrate.execute(self.context,
mox.IsA(objects.Instance),
'destination',
'block_migration',
'disk_over_commit')
self.mox.ReplayAll()
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'live_migrate_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.live_migrate_instance(self.context, inst_obj,
'destination', 'block_migration', 'disk_over_commit')
else:
self.conductor.migrate_server(self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
def test_cold_migrate(self):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(
self.conductor_manager.compute_rpcapi, 'prep_resize')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
inst = fake_instance.fake_db_instance(image_ref='image_ref')
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
flavor = flavors.get_default_flavor()
flavor['extra_specs'] = 'extra_specs'
request_spec = {'instance_type': flavor,
'instance_properties': {}}
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'image_ref', mox.IsA(objects.Instance)).AndReturn('image')
scheduler_utils.build_request_spec(
self.context, 'image',
[mox.IsA(objects.Instance)],
instance_type=flavor).AndReturn(request_spec)
hosts = [dict(host='host1', nodename=None, limits={})]
self.conductor_manager.scheduler_client.select_destinations(
self.context, request_spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(hosts)
filter_properties = {'limits': {},
'retry': {'num_attempts': 1,
'hosts': [['host1', None]]}}
self.conductor_manager.compute_rpcapi.prep_resize(
self.context, 'image', mox.IsA(objects.Instance),
mox.IsA(dict), 'host1', [], request_spec=request_spec,
filter_properties=filter_properties, node=None)
self.mox.ReplayAll()
scheduler_hint = {'filter_properties': {}}
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'resize_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.resize_instance(
self.context, inst_obj, {}, scheduler_hint, flavor, [])
else:
self.conductor.migrate_server(
self.context, inst_obj, scheduler_hint,
False, False, flavor, None, None, [])
def test_build_instances(self):
system_metadata = flavors.save_flavor_info({},
flavors.get_default_flavor())
instances = [fake_instance.fake_instance_obj(
self.context,
system_metadata=system_metadata,
expected_attrs=['system_metadata']) for i in xrange(2)]
instance_type = flavors.extract_flavor(instances[0])
instance_type['extra_specs'] = 'fake-specs'
instance_properties = jsonutils.to_primitive(instances[0])
self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
db.flavor_extra_specs_get(
self.context,
instance_type['flavorid']).AndReturn('fake-specs')
scheduler_utils.setup_instance_group(self.context, None, None)
self.conductor_manager.scheduler_client.select_destinations(
self.context, {'image': {'fake_data': 'should_pass_silently'},
'instance_properties': jsonutils.to_primitive(
instances[0]),
'instance_type': instance_type,
'instance_uuids': [inst.uuid for inst in instances],
'num_instances': 2},
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
db.instance_get_by_uuid(self.context, instances[0].uuid,
columns_to_join=['system_metadata'],
use_slave=False).AndReturn(
jsonutils.to_primitive(instances[0]))
db.block_device_mapping_get_all_by_instance(self.context,
instances[0].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host1',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type,
'instance_uuids': [inst.uuid for inst in instances],
'num_instances': 2},
filter_properties={'retry': {'num_attempts': 1,
'hosts': [['host1', 'node1']]},
'limits': []},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node1', limits=[])
db.instance_get_by_uuid(self.context, instances[1].uuid,
columns_to_join=['system_metadata'],
use_slave=False).AndReturn(
jsonutils.to_primitive(instances[1]))
db.block_device_mapping_get_all_by_instance(self.context,
instances[1].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host2',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type,
'instance_uuids': [inst.uuid for inst in instances],
'num_instances': 2},
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2', 'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image={'fake_data': 'should_pass_silently'},
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_build_instances_scheduler_failure(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
exception = exc.NoValidHost(reason='fake-reason')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
scheduler_utils.setup_instance_group(self.context, None, None)
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1,
'hosts': []}}).AndRaise(exception)
for instance in instances:
scheduler_driver.handle_schedule_error(self.context, exception,
instance.uuid, spec)
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_unshelve_instance_on_host(self):
db_instance = self._create_fake_instance()
instance = objects.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'start_instance')
self.mox.StubOutWithMock(self.conductor_manager, '_delete_image')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.compute_rpcapi.start_instance(self.context,
instance)
self.conductor_manager._delete_image(self.context,
'fake_image_id')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_offloaded_instance_glance_image_not_found(self):
shelved_image_id = "image_not_found"
db_instance = self._create_fake_instance()
instance = objects.Instance.get_by_uuid(
self.context,
db_instance['uuid'],
expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
e = exc.ImageNotFound(image_id=shelved_image_id)
self.conductor_manager.image_api.get(
self.context, shelved_image_id, show_deleted=False).AndRaise(e)
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_host'] = 'fake-mini'
system_metadata['shelved_image_id'] = shelved_image_id
self.assertRaises(
exc.UnshelveException,
self.conductor_manager.unshelve_instance,
self.context, instance)
self.assertEqual(instance.vm_state, vm_states.ERROR)
def test_unshelve_offloaded_instance_image_id_is_none(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = objects.Instance.get_by_uuid(
self.context,
db_instance['uuid'],
expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
system_metadata = instance.system_metadata
system_metadata['shelved_image_id'] = None
instance.save()
self.assertRaises(
exc.UnshelveException,
self.conductor_manager.unshelve_instance,
self.context, instance)
self.assertEqual(instance.vm_state, vm_states.ERROR)
def test_unshelve_instance_schedule_and_rebuild(self):
db_instance = self._create_fake_instance()
instance = objects.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.image_api.get(self.context,
'fake_image_id', show_deleted=False).AndReturn('fake_image')
self.conductor_manager._schedule_instances(self.context,
'fake_image', filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image='fake_image',
filter_properties={'limits': {}}, node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_instance_schedule_and_rebuild_novalid_host(self):
db_instance = self._create_fake_instance()
instance = objects.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
system_metadata = instance.system_metadata
def fake_schedule_instances(context, image, filter_properties,
*instances):
raise exc.NoValidHost(reason='')
with contextlib.nested(
mock.patch.object(self.conductor_manager.image_api, 'get',
return_value='fake_image'),
mock.patch.object(self.conductor_manager, '_schedule_instances',
fake_schedule_instances)
) as (_get_image, _schedule_instances):
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
_get_image.assert_has_calls([mock.call(self.context,
system_metadata['shelved_image_id'],
show_deleted=False)])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
db_instance = self._create_fake_instance()
instance = objects.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.image_api.get(self.context,
'fake_image_id', show_deleted=False).AndReturn(None)
self.conductor_manager._schedule_instances(self.context,
None, filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image=None,
filter_properties={'limits': {}}, node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_rebuild_instance(self):
db_instance = self._create_fake_instance()
inst_obj = objects.Instance.get_by_uuid(self.context,
db_instance['uuid'])
rebuild_args = self._prepare_rebuild_args({'host': inst_obj.host})
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations')
) as (rebuild_mock, select_dest_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
self.assertFalse(select_dest_mock.called)
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**rebuild_args)
def test_rebuild_instance_with_scheduler(self):
db_instance = self._create_fake_instance()
inst_obj = objects.Instance.get_by_uuid(self.context,
db_instance['uuid'])
inst_obj.host = 'noselect'
rebuild_args = self._prepare_rebuild_args({'host': None})
expected_host = 'thebesthost'
request_spec = {}
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
return_value=[{'host': expected_host}]),
mock.patch('nova.scheduler.utils.build_request_spec',
return_value=request_spec)
) as (rebuild_mock, select_dest_mock, bs_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context,
request_spec,
filter_properties)
rebuild_args['host'] = expected_host
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**rebuild_args)
def test_rebuild_instance_with_scheduler_no_host(self):
db_instance = self._create_fake_instance()
inst_obj = objects.Instance.get_by_uuid(self.context,
db_instance['uuid'])
inst_obj.host = 'noselect'
rebuild_args = self._prepare_rebuild_args({'host': None})
request_spec = {}
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason='')),
mock.patch('nova.scheduler.utils.build_request_spec',
return_value=request_spec)
) as (rebuild_mock, select_dest_mock, bs_mock):
self.assertRaises(exc.NoValidHost,
self.conductor_manager.rebuild_instance,
context=self.context, instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context,
request_spec,
filter_properties)
self.assertFalse(rebuild_mock.called)
class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""ComputeTaskManager Tests."""
def setUp(self):
super(ConductorTaskTestCase, self).setUp()
self.conductor = conductor_manager.ComputeTaskManager()
self.conductor_manager = self.conductor
def test_migrate_server_fails_with_rebuild(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, True, None, None, None)
def test_migrate_server_fails_with_flavor(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, False, "dummy", None, None)
def _build_request_spec(self, instance):
return {
'instance_properties': {
'uuid': instance['uuid'], },
}
def _test_migrate_server_deals_with_expected_exceptions(self, ex):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
live_migrate.execute(self.context, mox.IsA(objects.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(type(ex),
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_migrate_server_deals_with_invalidcpuinfo_exception(self):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = exc.InvalidCPUInfo(reason="invalid cpu info.")
live_migrate.execute(self.context, mox.IsA(objects.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.InvalidCPUInfo,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
@mock.patch.object(live_migrate, 'execute')
def test_migrate_server_deals_with_instancenotrunning_exception(self,
mock_live_migrate, mock_set_state):
inst = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
error = exc.InstanceNotRunning(instance_id="fake")
mock_live_migrate.side_effect = error
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.InstanceNotRunning,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
request_spec = self._build_request_spec(inst_obj)
mock_set_state.assert_called_once_with(self.context, 'compute_task',
'migrate_server',
dict(vm_state=inst_obj.vm_state,
task_state=None,
expected_task_state=task_states.MIGRATING),
error, request_spec, self.conductor_manager.db)
def test_migrate_server_deals_with_DestinationHypervisorTooOld(self):
ex = exc.DestinationHypervisorTooOld()
self._test_migrate_server_deals_with_expected_exceptions(ex)
def test_migrate_server_deals_with_HypervisorUnavailable(self):
ex = exc.HypervisorUnavailable(host='dummy')
self._test_migrate_server_deals_with_expected_exceptions(ex)
def test_migrate_server_deals_with_unexpected_exceptions(self):
instance = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = IOError()
live_migrate.execute(self.context, mox.IsA(objects.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.MigrationError,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_set_vm_state_and_notify(self):
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
scheduler_utils.set_vm_state_and_notify(
self.context, 'compute_task', 'method', 'updates',
'ex', 'request_spec', self.conductor.db)
self.mox.ReplayAll()
self.conductor._set_vm_state_and_notify(
self.context, 'method', 'updates', 'ex', 'request_spec')
def test_cold_migrate_no_valid_host_back_in_active_state(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type=flavor).AndReturn(request_spec)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.ACTIVE,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate,
self.context, inst_obj,
flavor, filter_props, [resvs])
def test_cold_migrate_no_valid_host_back_in_stopped_state(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type=flavor).AndReturn(request_spec)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs])
def test_cold_migrate_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
with contextlib.nested(
mock.patch.object(compute_utils, 'get_image_metadata',
return_value=image),
mock.patch.object(scheduler_utils, 'build_request_spec',
return_value=request_spec),
mock.patch.object(self.conductor.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason=""))
) as (image_mock, brs_mock, select_dest_mock):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs])
self.assertIn('cold migrate', nvh.message)
def test_cold_migrate_exception_host_in_error_state_and_raise(self):
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
hosts = [dict(host='host1', nodename=None, limits={})]
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(scheduler_utils,
'populate_filter_properties')
self.mox.StubOutWithMock(self.conductor.compute_rpcapi,
'prep_resize')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type='flavor').AndReturn(request_spec)
expected_filter_props = {'retry': {'num_attempts': 1,
'hosts': []},
'context': None}
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
expected_filter_props).AndReturn(hosts)
scheduler_utils.populate_filter_properties(filter_props,
hosts[0])
exc_info = test.TestingException('something happened')
expected_filter_props = {'retry': {'num_attempts': 1,
'hosts': []}}
self.conductor.compute_rpcapi.prep_resize(
self.context, image, inst_obj,
'flavor', hosts[0]['host'], [resvs],
request_spec=request_spec,
filter_properties=expected_filter_props,
node=hosts[0]['nodename']).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.conductor._cold_migrate,
self.context, inst_obj, 'flavor',
filter_props, [resvs])
def test_resize_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
flavor_new = flavors.get_flavor_by_name('m1.small')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
with contextlib.nested(
mock.patch.object(compute_utils, 'get_image_metadata',
return_value=image),
mock.patch.object(scheduler_utils, 'build_request_spec',
return_value=request_spec),
mock.patch.object(self.conductor.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason=""))
) as (image_mock, brs_mock, select_dest_mock):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor_new, filter_props,
[resvs])
self.assertIn('resize', nvh.message)
def test_build_instances_instance_not_found(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
self.mox.StubOutWithMock(instances[0], 'refresh')
self.mox.StubOutWithMock(instances[1], 'refresh')
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
scheduler_utils.setup_instance_group(self.context, None, None)
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
instances[0].refresh().AndRaise(
exc.InstanceNotFound(instance_id=instances[0].uuid))
instances[1].refresh()
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context, instance=instances[1], host='host2',
image={'fake-data': 'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IsA(objects.BlockDeviceMappingList),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(scheduler_utils, 'build_request_spec')
def test_build_instances_info_cache_not_found(self, build_request_spec,
setup_instance_group):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
image = {'fake-data': 'should_pass_silently'}
destinations = [{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}]
spec = {'fake': 'specs',
'instance_properties': instances[0]}
build_request_spec.return_value = spec
with contextlib.nested(
mock.patch.object(instances[0], 'refresh',
side_effect=exc.InstanceInfoCacheNotFound(
instance_uuid=instances[0].uuid)),
mock.patch.object(instances[1], 'refresh'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations', return_value=destinations),
mock.patch.object(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
) as (inst1_refresh, inst2_refresh, select_destinations,
build_and_run_instance):
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
setup_instance_group.assert_called_once_with(
self.context, None, None)
build_and_run_instance.assert_called_once_with(self.context,
instance=instances[1], host='host2', image={'fake-data':
'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mock.ANY,
node='node2', limits=[])
class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
test_compute.BaseTestCase):
"""Conductor compute_task RPC namespace Tests."""
def setUp(self):
super(ConductorTaskRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_rpcapi.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Compute task API Tests."""
def setUp(self):
super(ConductorTaskAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase):
"""Conductor LocalComputeTaskAPI Tests."""
def setUp(self):
super(ConductorLocalComputeTaskAPITestCase, self).setUp()
self.conductor = conductor_api.LocalComputeTaskAPI()
self.conductor_manager = self.conductor._manager._target
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities used by convolution layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.python.keras import backend
def convert_data_format(data_format, ndim):
if data_format == 'channels_last':
if ndim == 3:
return 'NWC'
elif ndim == 4:
return 'NHWC'
elif ndim == 5:
return 'NDHWC'
else:
raise ValueError('Input rank not supported:', ndim)
elif data_format == 'channels_first':
if ndim == 3:
return 'NCW'
elif ndim == 4:
return 'NCHW'
elif ndim == 5:
return 'NCDHW'
else:
raise ValueError('Input rank not supported:', ndim)
else:
raise ValueError('Invalid data_format:', data_format)
def normalize_tuple(value, n, name):
"""Transforms a single integer or iterable of integers into an integer tuple.
Arguments:
value: The value to validate and convert. Could an int, or any iterable
of ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
if len(value_tuple) != n:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value) + ' '
'including element ' + str(single_value) + ' of type' +
' ' + str(type(single_value)))
return value_tuple
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
dilation: dilation rate, integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding == 'same':
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
def conv_input_length(output_length, filter_size, padding, stride):
"""Determines input length of a convolution given output length.
Arguments:
output_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The input length (integer).
"""
if output_length is None:
return None
assert padding in {'same', 'valid', 'full'}
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
return (output_length - 1) * stride - 2 * pad + filter_size
def deconv_output_length(input_length, filter_size, padding, stride):
"""Determines output length of a transposed convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
input_length *= stride
if padding == 'valid':
input_length += max(filter_size - stride, 0)
elif padding == 'full':
input_length -= (stride + filter_size - 2)
return input_length
def normalize_data_format(value):
if value is None:
value = backend.image_data_format()
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
def normalize_padding(value):
padding = value.lower()
if padding not in {'valid', 'same', 'causal'}:
raise ValueError('The `padding` argument must be one of '
'"valid", "same" (or "causal", only for `Conv1D). '
'Received: ' + str(padding))
return padding
def convert_kernel(kernel):
"""Converts a Numpy kernel matrix from Theano format to TensorFlow format.
Also works reciprocally, since the transformation is its own inverse.
Arguments:
kernel: Numpy array (3D, 4D or 5D).
Returns:
The converted kernel.
Raises:
ValueError: in case of invalid kernel shape or invalid data_format.
"""
kernel = np.asarray(kernel)
if not 3 <= kernel.ndim <= 5:
raise ValueError('Invalid kernel shape:', kernel.shape)
slices = [slice(None, None, -1) for _ in range(kernel.ndim)]
no_flip = (slice(None, None), slice(None, None))
slices[-2:] = no_flip
return np.copy(kernel[slices])
| |
#
# Mimics an artificial neural network, which is a set of perceptrons.
# We can make this neural network learn from a set of data, and
# then predict on a set of examples.
#
# Compiled against Python 2.7
# Author: Stephen Bahr (sbahr@bu.edu)
import collections
import common
import math
import random
import sys
# Throughout this file, layer 0 of a neural network is the inputs, layer 1
# is the first hidden layer, etc.; the last layer is the outputs.
class NeuralNetwork:
"""An artificial neural network.
Fields:
weights: a list of lists of lists of numbers, where
weights[a][b][c] is the weight into unit b of layer a+1 from unit c in
layer a
num_hidden_layers: an integer, the number of hidden layers in the network
"""
def __init__(self, weights=None):
self.weights = weights
if weights:
self.num_hidden_layers = len(weights) - 1
def get_unit_values(self, features):
"""Calculate the activation of each unit in a neural network.
Args:
features: a vector of feature values
Returns:
units, a list of lists of numbers, where
units[a][b] is the activation of unit b in layer a
"""
#Initialize the list of lists
lst = []
# iterate over the entire network
for x in range(0, self.num_hidden_layers + 2):
# if first layer
if x == 0:
# add the inputs
lst.append(features)
# else if end layer
elif x == self.num_hidden_layers + 1:
# add empty base, number is based off last arg of weights
end = []
for y in range(0, len(self.weights[1])):
end.append(0)
lst.append(end)
# middle layer
else:
# Number of nodes in this layer x is the length
# weights[0]
mid = []
for y in range(0, len(self.weights[0])):
mid.append(0)
lst.append(mid)
# For each layer
for column in range(0, len(lst)):
# if the last layer
if column == len(lst) - 1:
# for each row in that layer
for row in range(0, len(lst[column])):
layer = lst[column]
weights = self.weights[1][row]
p = 0
# for each value i in the hidden layer to the left
for i in range(0, len(lst[column - 1])):
w = weights[i]
p = p + (w * lst[column-1][i])
layer[row] = self.activation(p)
elif column > 0:
# for each row in that layer
for row in range(0, len(lst[column])):
layer = lst[column]
weights = self.weights[0][row]
p = 0
# for each value i in the hidden layer to the left
for i in range(0, len(lst[column - 1])):
w = weights[i]
p = p + (w * lst[column-1][i])
layer[row] = self.activation(p)
return lst
def predict(self, features):
"""Calculate the prediction of a neural network on one example
Args:
features: a vector of feature values
Returns:
A list of numbers, the predictions for each output of the network
for the given example.
"""
return self.get_unit_values(features)[1]
def calculate_errors(self, unit_values, outputs):
"""Calculate the backpropagated errors for an input to a neural network.
Args:
unit_values: unit activations, a list of lists of numbers, where
unit_values[a][b] is the activation of unit b in layer a
outputs: a list of correct output values (numbers)
Returns:
A list of lists of numbers, the errors for each hidden or output unit.
errors[a][b] is the error for unit b in layer a+1.
"""
errors = self.constructUnits(unit_values[0])
for layer in range(len(errors)-1, 0, -1):
# if the last layer
if layer == len(errors) - 1:
# for each parent in that layer
for parent in range(0, len(errors[layer])):
p_out = unit_values[layer][parent]
e_out = p_out * (1 - p_out) * (outputs[parent] - p_out)
errors[layer][parent] = e_out
elif layer > 0:
# for each parent in that layer
for parent in range(0, len(errors[layer])):
err = 0
for child in range(0, len(self.weights[layer])):
w_hidden_to_out = self.weights[layer][child][parent]
p_hidden = unit_values[layer][parent]
err_out = errors[layer+1][child]
err = err + p_hidden * (1-p_hidden) * (w_hidden_to_out * err_out)
errors[layer][parent] = err
return errors[1:]
def activation(self, v):
return 1 / (1 + math.exp(-v))
def learn(self,
data,
num_hidden=16,
max_iterations=2000,
learning_rate=1,
num_hidden_layers=1):
"""Learn a neural network from data.
Sets the weights for a neural network based on training data.
Args:
data: a list of pairs of input and output vectors, both lists of numbers.
num_hidden: the number of hidden units to use.
max_iterations: the max number of iterations to train before stopping.
learning_rate: a scaling factor to apply to each weight update.
num_hidden_layers: the number of hidden layers to use.
Unless you are doing the extra credit, you can ignore this parameter.
Returns:
This object, once learned.
"""
self.initializeWeights(data, num_hidden, num_hidden_layers)
while max_iterations > 0:
max_iterations = max_iterations - 1
for d in data:
i = d[0]
o = d[1]
units = self.get_unit_values(i)
errors = self.calculate_errors(units, o)
print "Errors: " + str(errors)
# For every layer of weights
for layer in range(1, len(self.weights) + 1):
# For each parent in the layer to the left (aka input)
for parent in range(0, len(self.weights[layer - 1])):
#print "Node: " + str(parent)
for child in range(0, len(self.weights[layer - 1][parent])):
self.weights[layer - 1][parent][child] += learning_rate * units[layer-1][child] * errors[layer - 1][parent]
return self
def constructUnits(self, features):
"""Construct the neural net units from the number of hidden layers
Returns:
A list of lists
"""
lst = []
for x in range(0, self.num_hidden_layers + 2):
# if first layer
if x == 0:
# add the inputs
lst.append(features)
# else if end layer
elif x == self.num_hidden_layers + 1:
# add empty base, number is based off last arg of weights
end = []
for y in range(0, len(self.weights[1])):
end.append(0)
lst.append(end)
# middle layer
else:
# Number of nodes in this layer x is the length
# weights[0]
mid = []
for y in range(0, len(self.weights[0])):
mid.append(0)
lst.append(mid)
return lst
def initializeWeights(self, data, num_hidden, num_hidden_layers):
weights = []
hidden = []
for column in range(0, num_hidden_layers):
w = []
if column == 0:
for row in range(0, num_hidden):
wei = []
# For each input set in the first layer
for input in range(0, len(data[0][0])):
# For each element in that tuple
wei.append(random.random())
# for item in range(0, len(data[input])):
# wei.append(random.random())
w.append(wei)
else:
for input in range(0, num_hidden):
w.append(0)
hidden = w
output = []
for column in range(0, len(data[0][1])):
o = []
for i in range(0, num_hidden):
o.append(random.random())
output.append(o)
weights.append(hidden)
weights.append(output)
self.weights = weights
self.num_hidden_layers = num_hidden_layers
return
| |
from AppKit import *
import os
import time
from PyObjCTools import AppHelper
from defcon import Font
from extractor import extractUFO
import vanilla
from defconAppKit.windows.baseWindow import BaseWindowController
from reportlab.pdfgen import canvas
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import ParagraphStyle
from fontTools.pens.basePen import BasePen
import objc
objc.setVerbose(True)
class GlyphProoferAppDelegate(NSObject):
def applicationDidFinishLaunching_(self, notification):
GlyphProoferWindow()
class UFOPathFormatter(NSFormatter):
def stringForObjectValue_(self, obj):
if obj is None or isinstance(obj, NSNull):
return ""
return os.path.basename(obj)
def objectValueForString_(self, string):
return string
class GlyphProoferWindow(BaseWindowController):
def __init__(self):
width = 300
# Main Window
self.w = vanilla.Window((width, 400), "Glyph Proofer", minSize=(width, 300), maxSize=(width, 1000))
# Add a drop-able font list
columnDescriptions = [dict(title="path", formatter=UFOPathFormatter.alloc().init())]
self.w.fontList = vanilla.List((15, 15, -15, -50), [], columnDescriptions=columnDescriptions, showColumnTitles=False, enableDelete=True, drawFocusRing=False, otherApplicationDropSettings=dict(type=NSFilenamesPboardType, operation=NSDragOperationCopy, callback=self.dropFontCallback))
self.w.makePDFButton = vanilla.Button((-120, -37, -16, 20), "Generate PDF", callback=self.makePDFButtonCallback)
self.setUpBaseWindowBehavior()
self.w.open()
def dropFontCallback(self, sender, dropInfo):
acceptedFonts = [".ufo", ".ttf", ".otf"]
isProposal = dropInfo["isProposal"]
paths = dropInfo["data"]
paths = [dict(path=path) for path in paths if os.path.splitext(path)[-1].lower() in acceptedFonts]
paths = [path for path in paths if path not in self.w.fontList]
if not paths:
return False
if not isProposal:
self.w.fontList.extend(paths)
return True
def makePDFButtonCallback(self, sender):
validPaths = []
for d in self.w.fontList:
path = d["path"]
if os.path.exists(path):
validPaths.append(path)
if len(validPaths) > 0:
from reportlab.lib.pagesizes import LETTER
fonts = getFonts(validPaths)
names = getNames(fonts)
path = findAvailableFileName(os.path.expanduser("~/Desktop"), "report", "pdf")
makePDF(names, fonts, LETTER, path)
def makeFileName(directory, baseName, extension, counter=None):
if counter:
b = "%s %d.%s" % (baseName, counter, extension)
else:
b = "%s.%s" % (baseName, extension)
return os.path.join(directory, b)
def findAvailableFileName(directory, baseName, extension, counter=0):
# add number
if counter:
fileName = makeFileName(directory, baseName, extension, counter)
# no number
else:
fileName = makeFileName(directory, baseName, extension)
# recurse if necessary
if os.path.exists(fileName):
fileName = findAvailableFileName(directory, baseName, extension, counter+1)
# done
return fileName
def getFonts(paths):
"""Takes in paths, gives list of fonts"""
fonts = []
for path in paths:
if os.path.splitext(path)[-1].lower() == ".ufo":
font = Font(path)
else:
font = Font()
extractUFO(path, font, doKerning=False)
fonts.append(font)
fonts.sort(key=lambda weight: font.info.openTypeOS2WeightClass)
return fonts
def getNames(fonts):
"""Takes in a list of font objects and writes out XML list of glyphs"""
names = []
for font in fonts:
for k in font.keys():
if k not in names:
names.append(k)
names.sort()
return names
class ReportLabPen(BasePen):
def __init__(self, glyphSet, canvas):
BasePen.__init__(self, glyphSet)
self.path = canvas.beginPath()
def _moveTo(self, (x,y)):
self.path.moveTo(x,y)
def _lineTo(self, (x,y)):
self.path.lineTo(x,y)
def _curveToOne(self, (x1,y1), (x2,y2), (x3,y3)):
self.path.curveTo(x1, y1, x2, y2, x3, y3)
def _closePath(self):
pass
def makePDF(names, fonts, pageSize, path):
width, height = pageSize
margin = 36
resultsLeft = 100
resultsRight = width - margin
startTop = height - 75
blankLineHeight = 20
entryHeight = 20
glyphEntryHeight = 48
minorLine = .3
majorLine = 1
glyphSpacing = 120
settings = {
"date" : time.asctime(),
"width" : width,
"height" : height,
"margin" : margin,
"labelRight" : 90,
"resultsLeft" : resultsLeft,
"resultsRight" : resultsRight,
"resultsWidth" : resultsRight - resultsLeft,
"labelWidth" : width - margin - resultsLeft,
"contentWidth" : width - (margin * 2),
"startTop" : startTop,
"regularFont" : "Helvetica",
"boldFont" : "Helvetica-Bold",
"textPointSize" : 10,
"textLeading" : 15,
"entryHeight" : entryHeight,
"glyphPointSize" : 48,
"glyphEntryHeight" : glyphEntryHeight,
"minorLine" : minorLine,
"majorLine" : majorLine,
"glyphSpacing" : glyphSpacing,
}
basicStyle = ParagraphStyle("BasicReport")
basicStyle.fontName = settings["regularFont"]
basicStyle.fontSize = settings["textPointSize"]
basicStyle.leading = settings["textLeading"]
pdf = canvas.Canvas(path, pagesize=pageSize)
_drawTemplate(pdf, settings)
flatResults = []
currentTop = startTop
head = [u"Fonts tested:", ]
for font in fonts:
head.append(u"%s: %s" % (font.info.postscriptFullName, font.path))
flatResults.append(("head", head))
flatResults.append(("blank line", None))
for name in names:
flatResults.append(("line", majorLine))
flatResults.append(("note", name))
flatResults.append(("line", majorLine))
flatResults.append(("blank line", None))
flatResults.append(("blank line", None))
flatResults.append(("glyph", name))
lines = []
for tag, content in flatResults:
if tag == "line":
lines.append((currentTop, content))
elif tag == "blank line":
currentTop -= blankLineHeight
elif tag == "head":
entities = [
("&", "&"),
("<", "<"),
(">", ">"),
('"', """),
("'", "'"),
]
pdf.setFillColorRGB(0, 0, 0)
textObject = pdf.beginText(margin, currentTop - 13)
textObject.setFont(settings["boldFont"], settings["textPointSize"], leading=12)
start = textObject.getY()
for line in content:
if line != content[0]:
textObject.setFont(settings["regularFont"], settings["textPointSize"], leading=12)
for b, a in entities:
content = line.replace(b, a)
textObject.textLine(line)
end = textObject.getY()
pdf.drawText(textObject)
currentTop -= (start - end)
elif tag == "note":
entities = [
("&", "&"),
("<", "<"),
(">", ">"),
('"', """),
("'", "'"),
]
for b, a in entities:
content = content.replace(b, a)
p = Paragraph(content, basicStyle)
w, h = p.wrap(settings["resultsWidth"], 5000)
rH = h + ((h / settings["textLeading"]) * 5)
if rH > 20:
rH -= ((rH / 20) - 1) * 5
if currentTop - (rH + glyphEntryHeight + blankLineHeight * 2) < margin:
lines = _finishPage(pdf, lines, settings)
_drawTemplate(pdf, settings)
currentTop = startTop
_drawHighlightBox(pdf, currentTop, rH, settings)
p.drawOn(pdf, settings["resultsLeft"], currentTop - h - 3)
_drawLabel(pdf, currentTop, "glyph:", settings)
currentTop -= rH
else:
# make sure that we have room to start all this
if currentTop - glyphEntryHeight < margin:
lines = _finishPage(pdf, lines, settings)
_drawTemplate(pdf, settings)
currentTop = startTop
_drawGlyphs(pdf, content, currentTop, fonts, settings)
currentTop -= glyphEntryHeight
for top, weight in lines:
_drawLine(pdf, top, weight, settings)
pdf.save()
def _finishPage(pdf, lines, settings):
for top, weight in lines:
_drawLine(pdf, top, weight, settings)
pdf.showPage()
_drawLine(pdf, settings["startTop"], settings["minorLine"], settings)
return [(settings["startTop"], lines[-1][1])]
def _drawGlyphs(pdf, name, top, fonts, settings):
pdf.saveState()
pdf.translate(settings["resultsLeft"], top - 25)
upm = fonts[0].info.unitsPerEm
scale = float(settings["glyphPointSize"]) / upm
pdf.scale(scale, scale)
pdf.translate(0, -fonts[0].info.descender)
for font in fonts:
if name not in font:
glyph = font[".notdef"]
else:
glyph = font[name]
w = glyph.width
pen = ReportLabPen(font, pdf)
glyph.draw(pen)
pdf.setFillColorRGB(0, 0, 0)
pdf.drawPath(pen.path, stroke=False, fill=True)
pdf.translate(float(w + settings["glyphSpacing"]), 0)
pdf.restoreState()
def _drawLine(pdf, top, weight, settings):
pdf.setFillColorRGB(0, 0, 0)
pdf.setLineWidth(weight)
pdf.line(settings["margin"], top, settings["resultsRight"], top)
def _drawHighlightBox(pdf, t, h, settings):
pdf.setFillColorRGB(.95, .95, .95)
pdf.rect(settings["margin"], t-h, settings["contentWidth"], h, stroke=False, fill=True)
def _drawLabel(pdf, top, text, settings):
pdf.setFont(settings["regularFont"], settings["textPointSize"], leading=settings["textLeading"])
pdf.setFillColorRGB(.5, .5, .5)
pdf.drawRightString(settings["labelRight"], top - 13, text)
def _drawTemplate(pdf, settings):
barHeight = 18
barBottom = settings["height"] - settings["margin"] - barHeight
barLeft = settings["margin"]
barWidth = settings["width"] - (settings["margin"] * 2)
pdf.setFillColorRGB(0, 0, 0)
pdf.rect(barLeft, barBottom, barWidth, barHeight, stroke=False, fill=True)
pdf.setFillColorRGB(1, 1, 1)
pdf.setFont(settings["boldFont"], 8)
text = u"%s" % (settings["date"])
pdf.drawString(barLeft + 5, barBottom + 7, text)
pdf.drawRightString(barLeft + barWidth - 5, barBottom + 7, str(pdf.getPageNumber()))
if __name__ == "__main__":
AppHelper.runEventLoop()
| |
"""
Operations module.
@module pygowave.operations
"""
__license__ = """
PyGoWave Client Script a.k.a. Microwave
Copyright (C) 2009 by p2k
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pycow.decorators import Class, Implements
from pycow.utils import Events
# Currently supported and official operations
DOCUMENT_INSERT = 'DOCUMENT_INSERT'
DOCUMENT_DELETE = 'DOCUMENT_DELETE'
DOCUMENT_ELEMENT_INSERT = 'DOCUMENT_ELEMENT_INSERT'
DOCUMENT_ELEMENT_DELETE = 'DOCUMENT_ELEMENT_DELETE'
# Currently supported, but non-official operations
DOCUMENT_ELEMENT_DELTA = 'DOCUMENT_ELEMENT_DELTA'
DOCUMENT_ELEMENT_SETPREF = 'DOCUMENT_ELEMENT_SETPREF'
# Currently supported operations, which are not handled within OT
#WAVELET_ADD_PARTICIPANT = 'WAVELET_ADD_PARTICIPANT'
#WAVELET_REMOVE_SELF = 'WAVELET_REMOVE_SELF'
# Currently not supported operations
#WAVELET_APPEND_BLIP = 'WAVELET_APPEND_BLIP'
#WAVELET_CREATE = 'WAVELET_CREATE'
#WAVELET_DATADOC_SET = 'WAVELET_DATADOC_SET'
#WAVELET_SET_TITLE = 'WAVELET_SET_TITLE'
#BLIP_CREATE_CHILD = 'BLIP_CREATE_CHILD'
#BLIP_DELETE = 'BLIP_DELETE'
#DOCUMENT_ANNOTATION_DELETE = 'DOCUMENT_ANNOTATION_DELETE'
#DOCUMENT_ANNOTATION_SET = 'DOCUMENT_ANNOTATION_SET'
#DOCUMENT_ANNOTATION_SET_NORANGE = 'DOCUMENT_ANNOTATION_SET_NORANGE'
#DOCUMENT_APPEND = 'DOCUMENT_APPEND'
#DOCUMENT_APPEND_STYLED_TEXT = 'DOCUMENT_APPEND_STYLED_TEXT'
#DOCUMENT_REPLACE = 'DOCUMENT_REPLACE'
#DOCUMENT_ELEMENT_APPEND = 'DOCUMENT_ELEMENT_APPEND'
#DOCUMENT_ELEMENT_INSERT_AFTER = 'DOCUMENT_ELEMENT_INSERT_AFTER'
#DOCUMENT_ELEMENT_INSERT_BEFORE = 'DOCUMENT_ELEMENT_INSERT_BEFORE'
#DOCUMENT_ELEMENT_REPLACE = 'DOCUMENT_ELEMENT_REPLACE'
#DOCUMENT_INLINE_BLIP_APPEND = 'DOCUMENT_INLINE_BLIP_APPEND'
#DOCUMENT_INLINE_BLIP_DELETE = 'DOCUMENT_INLINE_BLIP_DELETE'
#DOCUMENT_INLINE_BLIP_INSERT = 'DOCUMENT_INLINE_BLIP_INSERT'
#DOCUMENT_INLINE_BLIP_INSERT_AFTER_ELEMENT = 'DOCUMENT_INLINE_BLIP_INSERT_AFTER_ELEMENT'
__all__ = [
"OpManager",
"DOCUMENT_INSERT",
"DOCUMENT_DELETE",
"DOCUMENT_ELEMENT_INSERT",
"DOCUMENT_ELEMENT_DELETE",
"DOCUMENT_ELEMENT_DELTA",
"DOCUMENT_ELEMENT_SETPREF",
]
@Class
class Operation(object):
"""
Represents a generic operation applied on the server.
This operation class contains data that is filled in depending on the
operation type.
It can be used directly, but doing so will not result
in local, transient reflection of state on the blips. In other words,
creating a "delete blip" operation will not remove the blip from the local
context for the duration of this session. It is better to use the OpBased
model classes directly instead.
@class {private} pygowave.operations.Operation
"""
def __init__(self, op_type, waveId, waveletId, blipId='', index=-1, prop=None):
"""
Initializes this operation with contextual data.
@constructor {public} initialize
@param {String} op_type Type of operation
@param {String} waveId The id of the wave that this operation is to
be applied.
@param {String} waveletId The id of the wavelet that this operation is
to be applied.
@param {optional String} blipId The optional id of the blip that this
operation is to be applied.
@param {optional int} index Optional integer index for content-based
operations.
@param {optional Object} prop A weakly typed property object is based
on the context of this operation.
"""
self.type = op_type
self.waveId = waveId
self.waveletId = waveletId
self.blipId = blipId
self.index = index
self.property = prop
def clone(self):
"""
Create a copy of this operation.
@function {public Boolean} clone
"""
return Operation(self.type, self.waveId, self.waveletId, self.blipId,
self.index, self.property)
def isNull(self):
"""
Return weather this operation is a null operation i.e. it does not
change anything.
@function {public Boolean} isNull
"""
if self.type == DOCUMENT_INSERT:
return len(self.property) == 0
elif self.type == DOCUMENT_DELETE:
return self.property == 0
return False
def isCompatibleTo(self, other_op):
"""
Check if the operation can be influenced by `other_op` and vice-versa.
@function {public Boolean} isCompatibleTo
@param {Operation} other_op
"""
# Currently all supported operations are compatible to each other (if on the same blip)
# DOCUMENT_INSERT DOCUMENT_DELETE DOCUMENT_ELEMENT_INSERT DOCUMENT_ELEMENT_DELETE DOCUMENT_ELEMENT_DELTA DOCUMENT_ELEMENT_SETPREF
if self.waveId != other_op.waveId \
or self.waveletId != other_op.waveletId \
or self.blipId != other_op.blipId:
return False
return True
def isInsert(self):
"""
Returns true, if this op is an insertion operation.
@function {public Boolean} isInsert
"""
return (self.type == DOCUMENT_INSERT or self.type == DOCUMENT_ELEMENT_INSERT)
def isDelete(self):
"""
Returns true, if this op is a deletion operation.
@function {public Boolean} isDelete
"""
return (self.type == DOCUMENT_DELETE or self.type == DOCUMENT_ELEMENT_DELETE)
def isChange(self):
"""
Returns true, if this op is an (attribute) change operation.
@function {public Boolean} isChange
"""
return (self.type == DOCUMENT_ELEMENT_DELTA or self.type == DOCUMENT_ELEMENT_SETPREF)
def length(self):
"""
Returns the length of this operation.
This can be interpreted as the distance a concurrent operation's index
must be moved to include the effects of this operation.
@function {public int} length
"""
if self.type == DOCUMENT_INSERT:
return len(self.property)
elif self.type == DOCUMENT_DELETE:
return self.property
elif self.type == DOCUMENT_ELEMENT_INSERT or self.type == DOCUMENT_ELEMENT_DELETE:
return 1
return 0
def resize(self, value):
"""
Delete operations: Sets the amount of deleted characters/elements to
`value`.
Other operations: No effect.
@function {public} resize
@param {int} value
"""
if self.type == DOCUMENT_DELETE:
if value > 0:
self.property = value
else:
self.property = 0
def insertString(self, pos, s):
"""
DOCUMENT_INSERT: Inserts the string into the property.
Other operations: No effect.
@function {public} insertString
@param {int} pos Position to insert the string
@param {String} s String to insert
"""
if self.type == DOCUMENT_INSERT:
self.property = self.property[:pos] + s + self.property[pos:]
def deleteString(self, pos, length):
"""
DOCUMENT_INSERT: Deletes a substring from the property.
Other operations: No effect.
@function {public} deleteString
@param {int} pos Position to delete the substring
@param {int} length Amout of characters to remove
"""
if self.type == DOCUMENT_INSERT:
self.property = self.property[:pos] + self.property[pos+length:]
def serialize(self):
"""
Serialize this operation into a dictionary. Official robots API format.
@function {public String} serialize
"""
return {
"type": self.type,
"waveId": self.waveId,
"waveletId": self.waveletId,
"blipId": self.blipId,
"index": self.index,
"property": self.property,
}
def __repr__(self):
return "%s(\"%s\",%d,%s)" % (self.type.lower(), self.blipId,
self.index, repr(self.property))
@staticmethod
def unserialize(obj):
"""
Unserialize an operation from a dictionary.
@function {public static Operation} unserialize
"""
return Operation(obj["type"], obj["waveId"], obj["waveletId"],
obj["blipId"], obj["index"], obj["property"])
@Implements(Events)
@Class
class OpManager(object):
"""
Manages operations: Creating, merging, transforming, serializing.
The operation manager wraps single operations as functions and generates
operations in-order. It keeps a list of operations and allows
transformation, merging and serializing.
An OpManager is always associated with exactly one wave/wavelet.
@class {public} pygowave.operations.OpManager
"""
# --- Event documentation ---
"""
Fired if an operation in this manager has been changed.
@event onOperationChanged
@param {int} index Index of the changed operation
"""
"""
Fired if one or more operations are about to be removed.
@event onBeforeOperationsRemoved
@param {int} start Start index of the removal.
@param {int} end End index of the removal.
"""
"""
Fired if one or more operations have been removed.
@event onAfterOperationsRemoved
@param {int} start Start index of the removal.
@param {int} end End index of the removal.
"""
"""
Fired if one or more operations are about to be inserted.
@event onBeforeOperationsInserted
@param {int} start Start index of the insertion.
@param {int} end End index of the insertion.
"""
"""
Fired if one or more operations have been inserted.
@event onAfterOperationsInserted
@param {int} start Start index of the insertion.
@param {int} end End index of the insertion.
"""
# ---------------------------
def __init__(self, waveId, waveletId):
"""
Initializes the op manager with a wave and wavelet ID.
@constructor {public} initialize
@param {String} waveId The ID of the wave
@param {String} waveletId The ID of the wavelet
"""
self.waveId = waveId
self.waveletId = waveletId
self.operations = []
def isEmpty(self):
"""
Return true if this manager is not holding operations.
@function {public Boolean} isEmpty
"""
return len(self.operations) == 0
def transform(self, input_op):
"""
Transform the input operation on behalf of the manager's operations
list. This will simultaneously transform the operations list on behalf
of the input operation.
This method returns a list of applicable operations. This list may be
empty or it may contain any number of new operations (according to
results of deletion, modification and splitting; i.e. the input
operation is not modified by itself).
@function {public Operation[]} transform
@param {Operation} input_op
"""
new_op = None
op_lst = [input_op.clone()]
# From The Zen of Python, by Tim Peters:
# "Complex is better than complicated."
i = 0
while i < len(self.operations):
myop = self.operations[i]
j = 0
while j < len(op_lst):
op = op_lst[j]
# Do not handle incompatible operations
if not op.isCompatibleTo(myop): continue
# Check all possible cases
end = None
if op.isDelete() and myop.isDelete():
if op.index < myop.index:
end = op.index + op.length()
if end <= myop.index:
myop.index -= op.length()
self.fireEvent("operationChanged", i)
elif end < myop.index + myop.length(): # and end > myop.index
op.resize(myop.index - op.index)
myop.resize(myop.length() - (end - myop.index))
myop.index = op.index
self.fireEvent("operationChanged", i)
else: # end >= myop.index + myop.length()
op.resize(op.length() - myop.length())
self.fireEvent("beforeOperationsRemoved", [i, i])
self.operations.pop(i)
self.fireEvent("afterOperationsRemoved", [i, i])
i -= 1
break
else: # op.index >= myop.index
end = myop.index + myop.length()
if op.index >= end:
op.index -= myop.length()
elif op.index + op.length() <= end: # and op.index < end
myop.resize(myop.length() - op.length())
op_lst.pop(j)
j -= 1
if myop.isNull():
self.fireEvent("beforeOperationsRemoved", [i, i])
self.operations.pop(i)
self.fireEvent("afterOperationsRemoved", [i, i])
i -= 1
break
else:
self.fireEvent("operationChanged", i)
else: # op.index + op.length() > end
myop.resize(myop.length() - (end - op.index))
self.fireEvent("operationChanged", i)
op.resize(op.length() - (end - op.index))
op.index = myop.index
elif op.isDelete() and myop.isInsert():
if op.index < myop.index:
if op.index + op.length() <= myop.index:
myop.index -= op.length()
self.fireEvent("operationChanged", i)
else: # op.index + op.length() > myop.index
new_op = op.clone()
op.resize(myop.index - op.index)
new_op.resize(new_op.length() - op.length())
op_lst.insert(j+1, new_op)
myop.index -= op.length()
self.fireEvent("operationChanged", i)
else: # op.index >= myop.index
op.index += myop.length()
elif op.isInsert() and myop.isDelete():
if op.index <= myop.index:
myop.index += op.length()
self.fireEvent("operationChanged", i)
elif op.index >= myop.index + myop.length(): # op.index > myop.index
op.index -= myop.length()
else: # op.index < myop.index + myop.length()
new_op = myop.clone()
myop.resize(op.index - myop.index)
self.fireEvent("operationChanged", i)
new_op.resize(new_op.length() - myop.length())
self.fireEvent("beforeOperationsInserted", [i+1, i+1])
self.operations.insert(i+1, new_op)
self.fireEvent("afterOperationsInserted", [i+1, i+1])
op.index = myop.index
elif op.isInsert() and myop.isInsert():
if op.index <= myop.index:
myop.index += op.length()
self.fireEvent("operationChanged", i)
else: # op.index > myop.index
op.index += myop.length()
elif op.isChange() and myop.isDelete():
if op.index > myop.index:
if op.index <= myop.index + myop.length():
op.index = myop.index
else:
op.index -= myop.length()
elif op.isChange() and myop.isInsert():
if op.index >= myop.index:
op.index += myop.length()
elif op.isDelete() and myop.isChange():
if op.index < myop.index:
if myop.index <= op.index + op.length():
myop.index = op.index
self.fireEvent("operationChanged", i)
else:
myop.index -= op.length()
self.fireEvent("operationChanged", i)
elif op.isInsert() and myop.isChange():
if op.index <= myop.index:
myop.index += op.length()
self.fireEvent("operationChanged", i)
j += 1
i += 1
return op_lst
def fetch(self):
"""
Returns the pending operations and removes them from this manager.
@function {public Operation[]} fetch
"""
ops = self.operations
self.fireEvent("beforeOperationsRemoved", [0, len(ops)-1])
self.operations = []
self.fireEvent("afterOperationsRemoved", [0, len(ops)-1])
return ops
def put(self, ops):
"""
Opposite of fetch. Inserts all given operations into this manager.
@function {public} put
@param {Operation[]} ops
"""
if len(ops) == 0:
return
start = len(self.operations)
end = start + len(ops) - 1
self.fireEvent("beforeOperationsInserted", [start, end])
self.operations.extend(ops)
self.fireEvent("afterOperationsInserted", [start, end])
def serialize(self, fetch = False):
"""
Serialize this manager's operations into a list of dictionaries.
Set fetch to true to also clear this manager.
@function {public Object[]} serialize
@param {optional Boolean} fetch
"""
if fetch:
ops = self.fetch()
else:
ops = self.operations
out = []
for op in ops:
out.append(op.serialize())
return out
def unserialize(self, serial_ops):
"""
Unserialize a list of dictionaries to operations and add them to this
manager.
@function {public} unserialize
@param {Object[]} serial_ops
"""
ops = []
for op in serial_ops:
ops.append(Operation.unserialize(op))
self.put(ops)
def __insert(self, newop):
"""
Inserts and probably merges an operation into the manager's
operation list.
@function {private} __insert
@param {Operation} newop
"""
# Element delta's can always merge with predecessors
op = None
i = 0
if newop.type == DOCUMENT_ELEMENT_DELTA:
for i in xrange(len(self.operations)):
op = self.operations[i]
if op.type == DOCUMENT_ELEMENT_DELTA and newop.property["id"] == op.property["id"]:
op.property["delta"].update(newop.property["delta"])
self.fireEvent("operationChanged", i)
return
# Others: Only merge with the last op (otherwise this may get a bit complicated)
i = len(self.operations) - 1
if i >= 0:
op = self.operations[i]
if newop.type == DOCUMENT_INSERT and op.type == DOCUMENT_INSERT:
if newop.index >= op.index and newop.index <= op.index+op.length():
op.insertString(newop.index-op.index, newop.property)
self.fireEvent("operationChanged", i)
return
elif newop.type == DOCUMENT_DELETE and op.type == DOCUMENT_INSERT:
if newop.index >= op.index and newop.index < op.index+op.length():
remain = op.length() - (newop.index - op.index)
if remain > newop.length():
op.deleteString(newop.index - op.index, newop.length())
newop.resize(0)
else:
op.deleteString(newop.index - op.index, remain)
newop.resize(newop.length() - remain)
if op.isNull():
self.fireEvent("beforeOperationsRemoved", [i, i])
self.operations.pop(i)
self.fireEvent("afterOperationsRemoved", [i, i])
i -= 1
else:
self.fireEvent("operationChanged", i)
if newop.isNull():
return
elif newop.index < op.index and newop.index+newop.length() > op.index:
if newop.index+newop.length() >= op.index+op.length():
newop.resize(newop.length() - op.length())
self.fireEvent("beforeOperationsRemoved", [i, i])
self.operations.pop(i)
self.fireEvent("afterOperationsRemoved", [i, i])
i -= 1
else:
dlength = newop.index+newop.length() - op.index
newop.resize(newop.length() - dlength)
op.deleteString(0, dlength)
self.fireEvent("operationChanged", i)
elif newop.type == DOCUMENT_DELETE and op.type == DOCUMENT_DELETE:
if newop.index == op.index: # Delete at start
op.resize(op.length() + newop.length())
self.fireEvent("operationChanged", i)
return
if newop.index == op.index-newop.length(): # Delete at end
op.index -= newop.length()
op.resize(op.length() + newop.length())
self.fireEvent("operationChanged", i)
return
# If we reach this the operation could not be merged, so add it.
self.fireEvent("beforeOperationsInserted", [i+1, i+1])
self.operations.append(newop)
self.fireEvent("afterOperationsInserted", [i+1, i+1])
# --------------------------------------------------------------------------
def documentInsert(self, blipId, index, content):
"""
Requests to insert content into a document at a specific location.
@function {public} documentInsert
@param {String} blipId The blip id that this operation is applied to
@param {int} index The position insert the content at in ths document
@param {String} content The content to insert
"""
self.__insert(Operation(
DOCUMENT_INSERT,
self.waveId, self.waveletId, blipId,
index,
content
))
def documentDelete(self, blipId, start, end):
"""
Requests to delete content in a given range.
@function {public} documentDelete
@param {String} blipId The blip id that this operation is applied to
@param {int} start Start of the range
@param {int} end End of the range
"""
self.__insert(Operation(
DOCUMENT_DELETE,
self.waveId, self.waveletId, blipId,
start,
end-start # = length
))
def documentElementInsert(self, blipId, index, type, properties):
"""
Requests to insert an element at the given position.
@function {public} documentElementInsert
@param {String} blipId The blip id that this operation is applied to
@param {int} index Position of the new element
@param {String} type Element type
@param {Object} properties Element properties
"""
self.__insert(Operation(
DOCUMENT_ELEMENT_INSERT,
self.waveId, self.waveletId, blipId,
index,
{
"type": type,
"properties": properties
}
))
def documentElementDelete(self, blipId, index):
"""
Requests to delete an element from the given position.
@function {public} documentElementDelete
@param {String} blipId The blip id that this operation is applied to
@param {int} index Position of the element to delete
"""
self.__insert(Operation(
DOCUMENT_ELEMENT_DELETE,
self.waveId, self.waveletId, blipId,
index,
None
))
def documentElementDelta(self, blipId, index, delta):
"""
Requests to apply a delta to the element at the given position.
@function {public} documentElementDelta
@param {String} blipId The blip id that this operation is applied to
@param {int} index Position of the element
@param {Object} delta Delta to apply to the element
"""
self.__insert(Operation(
DOCUMENT_ELEMENT_DELTA,
self.waveId, self.waveletId, blipId,
index,
delta
))
def documentElementSetpref(self, blipId, index, key, value):
"""
Requests to set a UserPref of the element at the given position.
@function {public} documentElementSetpref
@param {String} blipId The blip id that this operation is applied to
@param {int} index Position of the element
@param {Object} key Name of the UserPref
@param {Object} value Value of the UserPref
"""
self.__insert(Operation(
DOCUMENT_ELEMENT_SETPREF,
self.waveId, self.waveletId, blipId,
index,
{
"key": key,
"value": value
}
))
| |
from __future__ import print_function
import sys
import os
import subprocess
import txtsh.shell as shell
import txtsh.file_explorer as file_explorer
import txtsh.log as log
import txtsh.pager as pager
from txtsh.text import Text
from txtsh.header import *
def _help(*args):
pager.page('README.md')
return GO
def _info(*args):
print("TXTSH Text Analysis Shell (ver. 0.0) on {}." .format(PLATFORM))
print("Type '!help' for a list of commands.")
return GO
def _log(*args):
if len(args) > 0:
if args[0] == 'clear':
log.clear()
else:
print("Unknown command to log: '{}'" .format(args[0]))
else:
log.view()
return GO
def _load(*args):
"""
data_type must be either 'string' or 'file'.
If data_type is 'string', data will be
loaded verbatim into a new Text object.
If data_type is 'file', it will attempt to
open the data as a read-only text file and
load its contents into a new Text object.
A file explorer will launch if file data is not provided.
"""
if len(args) < 1:
print("Error: load takes at least one argument.")
return GO
data_type = args[0]
if data_type != 'string' and data_type != 'file':
print("Error: you must specify 'string' or 'file'")
return GO
if len(args) != 2 and data_type == 'string':
print("Error: you must provide a single string to load.")
return GO
if len(args) == 1 and data_type == 'file':
explorer = file_explorer.Explorer()
data = explorer.navigate()
if data is None:
print("Error: must provide a data file to load.")
return GO
else:
data = args[1]
new = Text()
try:
new.load_data(data_type, data)
return GO
except Exception:
log.write(traceback=True)
print("Loading text data failed.")
return GO
def _free(*args):
id_num = int(args[0])
obj = None
for member in Text.members:
if member.id_num == id_num:
obj = member
if not obj:
print("No object found with id '{}'." .format(id_num))
return GO
Text.members.remove(obj)
return GO
def _list(*args):
if len(Text.members) == 0:
print("No objects loaded.")
else:
print("ID\tSAMPLE\n")
for member in Text.members:
print('%2s' % member.id_num, end="")
print('\t', end="")
print('"{}"' .format(member.title))
return GO
def _use(*args):
try:
id_num = int(args[0])
except:
print("Error: must specify an ID number.")
return GO
obj = None
for member in Text.members:
if member.id_num == id_num:
obj = member
if not obj:
print("No object found with id '{}'." .format(id_num))
return GO
subsh = shell.Subshell(obj)
subsh.run()
return GO
def _quit(*args):
# Cleanup memory for any loaded objects
for i in reversed(range(len(Text.members))):
_free(Text.members[i].id_num)
return STOP
def _restart(*args):
ans = raw_input("Restart txtsh? All loaded objects will be freed! [y/n]: ")
if ans.lower().strip() == 'y':
print("\nRestarting...\n")
# Cleanup memory for any loaded objects
for i in reversed(range(len(Text.members))):
_free(Text.members[i].id_num)
python = sys.executable
os.execl(python, python, * sys.argv)
else:
return GO
def _update(*args):
print("Don't use this...")
return GO
print("Updating txtsh...")
try:
os.chdir(TXTSH_HOME_DIR)
p = subprocess.Popen(['git', 'pull', 'origin', 'master'])
code = p.wait()
if code == 0:
print("Update complete.")
print("If changes were made, restart \
(!restart) txtsh for changes to take effect.")
else:
print("Update failed.")
except:
print("Update command failed to execute.")
return GO
# ---- MAPPINGS -----
map = {
'!help': _help,
'!info': _info,
'!log': _log,
'!load': _load,
'!free': _free,
'!list': _list,
'!use': _use,
'!quit': _quit,
'!restart': _restart,
'!update': _update
}
| |
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import datetime
import os
import sys
from os.path import join as pjoin
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
import numpy as np
from numpy.testing import (TestCase, assert_array_almost_equal,
assert_array_equal, assert_equal, assert_,
assert_raises, dec, run_module_suite)
from scipy.io.arff.arffread import loadarff
from scipy.io.arff.arffread import read_header, parse_type, ParseArffError
from scipy._lib._version import NumpyVersion
data_path = pjoin(os.path.dirname(__file__), 'data')
test1 = os.path.join(data_path, 'test1.arff')
test2 = os.path.join(data_path, 'test2.arff')
test3 = os.path.join(data_path, 'test3.arff')
test4 = pjoin(data_path, 'test4.arff')
test5 = pjoin(data_path, 'test5.arff')
test6 = pjoin(data_path, 'test6.arff')
test7 = pjoin(data_path, 'test7.arff')
test8 = pjoin(data_path, 'test8.arff')
expect4_data = [(0.1, 0.2, 0.3, 0.4, 'class1'),
(-0.1, -0.2, -0.3, -0.4, 'class2'),
(1, 2, 3, 4, 'class3')]
expected_types = ['numeric', 'numeric', 'numeric', 'numeric', 'nominal']
missing = pjoin(data_path, 'missing.arff')
expect_missing_raw = np.array([[1, 5], [2, 4], [np.nan, np.nan]])
expect_missing = np.empty(3, [('yop', float), ('yap', float)])
expect_missing['yop'] = expect_missing_raw[:, 0]
expect_missing['yap'] = expect_missing_raw[:, 1]
class DataTest(TestCase):
def test1(self):
# Parsing trivial file with nothing.
self._test(test4)
def test2(self):
# Parsing trivial file with some comments in the data section.
self._test(test5)
def test3(self):
# Parsing trivial file with nominal attribute of 1 character.
self._test(test6)
def _test(self, test_file):
data, meta = loadarff(test_file)
for i in range(len(data)):
for j in range(4):
assert_array_almost_equal(expect4_data[i][j], data[i][j])
assert_equal(meta.types(), expected_types)
def test_filelike(self):
# Test reading from file-like object (StringIO)
f1 = open(test1)
data1, meta1 = loadarff(f1)
f1.close()
f2 = open(test1)
data2, meta2 = loadarff(StringIO(f2.read()))
f2.close()
assert_(data1 == data2)
assert_(repr(meta1) == repr(meta2))
class MissingDataTest(TestCase):
def test_missing(self):
data, meta = loadarff(missing)
for i in ['yop', 'yap']:
assert_array_almost_equal(data[i], expect_missing[i])
class NoDataTest(TestCase):
def test_nodata(self):
# The file nodata.arff has no data in the @DATA section.
# Reading it should result in an array with length 0.
nodata_filename = os.path.join(data_path, 'nodata.arff')
data, meta = loadarff(nodata_filename)
expected_dtype = np.dtype([('sepallength', '<f8'),
('sepalwidth', '<f8'),
('petallength', '<f8'),
('petalwidth', '<f8'),
('class', 'S15')])
assert_equal(data.dtype, expected_dtype)
assert_equal(data.size, 0)
class HeaderTest(TestCase):
def test_type_parsing(self):
# Test parsing type of attribute from their value.
ofile = open(test2)
rel, attrs = read_header(ofile)
ofile.close()
expected = ['numeric', 'numeric', 'numeric', 'numeric', 'numeric',
'numeric', 'string', 'string', 'nominal', 'nominal']
for i in range(len(attrs)):
assert_(parse_type(attrs[i][1]) == expected[i])
def test_badtype_parsing(self):
# Test parsing wrong type of attribute from their value.
ofile = open(test3)
rel, attrs = read_header(ofile)
ofile.close()
for name, value in attrs:
assert_raises(ParseArffError, parse_type, value)
def test_fullheader1(self):
# Parsing trivial header with nothing.
ofile = open(test1)
rel, attrs = read_header(ofile)
ofile.close()
# Test relation
assert_(rel == 'test1')
# Test numerical attributes
assert_(len(attrs) == 5)
for i in range(4):
assert_(attrs[i][0] == 'attr%d' % i)
assert_(attrs[i][1] == 'REAL')
# Test nominal attribute
assert_(attrs[4][0] == 'class')
assert_(attrs[4][1] == '{class0, class1, class2, class3}')
def test_dateheader(self):
ofile = open(test7)
rel, attrs = read_header(ofile)
ofile.close()
assert_(rel == 'test7')
assert_(len(attrs) == 5)
assert_(attrs[0][0] == 'attr_year')
assert_(attrs[0][1] == 'DATE yyyy')
assert_(attrs[1][0] == 'attr_month')
assert_(attrs[1][1] == 'DATE yyyy-MM')
assert_(attrs[2][0] == 'attr_date')
assert_(attrs[2][1] == 'DATE yyyy-MM-dd')
assert_(attrs[3][0] == 'attr_datetime_local')
assert_(attrs[3][1] == 'DATE "yyyy-MM-dd HH:mm"')
assert_(attrs[4][0] == 'attr_datetime_missing')
assert_(attrs[4][1] == 'DATE "yyyy-MM-dd HH:mm"')
def test_dateheader_unsupported(self):
ofile = open(test8)
rel, attrs = read_header(ofile)
ofile.close()
assert_(rel == 'test8')
assert_(len(attrs) == 2)
assert_(attrs[0][0] == 'attr_datetime_utc')
assert_(attrs[0][1] == 'DATE "yyyy-MM-dd HH:mm Z"')
assert_(attrs[1][0] == 'attr_datetime_full')
assert_(attrs[1][1] == 'DATE "yy-MM-dd HH:mm:ss z"')
class DateAttributeTest(TestCase):
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
"No np.datetime64 in Numpy < 1.7.0")
def setUp(self):
self.data, self.meta = loadarff(test7)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
"No np.datetime64 in Numpy < 1.7.0")
def test_year_attribute(self):
expected = np.array([
'1999',
'2004',
'1817',
'2100',
'2013',
'1631'
], dtype='datetime64[Y]')
assert_array_equal(self.data["attr_year"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
"No np.datetime64 in Numpy < 1.7.0")
def test_month_attribute(self):
expected = np.array([
'1999-01',
'2004-12',
'1817-04',
'2100-09',
'2013-11',
'1631-10'
], dtype='datetime64[M]')
assert_array_equal(self.data["attr_month"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
"No np.datetime64 in Numpy < 1.7.0")
def test_date_attribute(self):
expected = np.array([
'1999-01-31',
'2004-12-01',
'1817-04-28',
'2100-09-10',
'2013-11-30',
'1631-10-15'
], dtype='datetime64[D]')
assert_array_equal(self.data["attr_date"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
"No np.datetime64 in Numpy < 1.7.0")
def test_datetime_local_attribute(self):
expected = np.array([
datetime.datetime(year=1999, month=1, day=31, hour=0, minute=1),
datetime.datetime(year=2004, month=12, day=1, hour=23, minute=59),
datetime.datetime(year=1817, month=4, day=28, hour=13, minute=0),
datetime.datetime(year=2100, month=9, day=10, hour=12, minute=0),
datetime.datetime(year=2013, month=11, day=30, hour=4, minute=55),
datetime.datetime(year=1631, month=10, day=15, hour=20, minute=4)
], dtype='datetime64[m]')
assert_array_equal(self.data["attr_datetime_local"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
"No np.datetime64 in Numpy < 1.7.0")
def test_datetime_missing(self):
expected = np.array([
'nat',
'2004-12-01T23:59Z',
'nat',
'nat',
'2013-11-30T04:55Z',
'1631-10-15T20:04Z'
], dtype='datetime64[m]')
assert_array_equal(self.data["attr_datetime_missing"], expected)
def test_datetime_timezone(self):
assert_raises(ValueError, loadarff, test8)
if __name__ == "__main__":
run_module_suite()
| |
# a Pythonesque Canvas v0.8
# Author : Jerome Alet - <alet@librelogiciel.com>
# License : ReportLab's license
#
# $Id:pycanvas.py 1821 2002-11-06 17:11:31Z rgbecker $
#
__doc__ = """pycanvas.Canvas : a Canvas class which can also output Python source code.
pycanvas.Canvas class works exactly like canvas.Canvas, but you can
call str() on pycanvas.Canvas instances. Doing so will return the
Python source code equivalent to your own program, which would, when
run, produce the same PDF document as your original program.
Generated Python source code defines a doIt() function which accepts
a filename or file-like object as its first parameter, and an
optional boolean parameter named "regenerate".
The doIt() function will generate a PDF document and save it in the
file you specified in this argument. If the regenerate parameter is
set then it will also return an automatically generated equivalent
Python source code as a string of text, which you can run again to
produce the very same PDF document and the Python source code, which
you can run again... ad nauseam ! If the regenerate parameter is
unset or not used at all (it then defaults to being unset) then None
is returned and the doIt() function is much much faster, it is also
much faster than the original non-serialized program.
the reportlab/test/test_pdfgen_pycanvas.py program is the test suite
for pycanvas, you can do the following to run it :
First set verbose=1 in reportlab/rl_config.py
then from the command interpreter :
$ cd reportlab/test
$ python test_pdfgen_pycanvas.py >n1.py
this will produce both n1.py and test_pdfgen_pycanvas.pdf
then :
$ python n1.py n1.pdf >n2.py
$ python n2.py n2.pdf >n3.py
$ ...
n1.py, n2.py, n3.py and so on will be identical files.
they eventually may end being a bit different because of
rounding problems, mostly in the comments, but this
doesn't matter since the values really are the same
(e.g. 0 instead of 0.0, or .53 instead of 0.53)
n1.pdf, n2.pdf, n3.pdf and so on will be PDF files
similar to test_pdfgen_pycanvas.pdf.
Alternatively you can import n1.py (or n3.py, or n16384.py if you prefer)
in your own program, and then call its doIt function :
import n1
pythonsource = n1.doIt("myfile.pdf", regenerate=1)
Or if you don't need the python source code and want a faster result :
import n1
n1.doIt("myfile.pdf")
When the generated source code is run directly as an independant program,
then the equivalent python source code is printed to stdout, e.g. :
python n1.py
will print the python source code equivalent to n1.py
Why would you want to use such a beast ?
- To linearize (serialize?) a program : optimizing some complex
parts for example.
- To debug : reading the generated Python source code may help you or
the ReportLab team to diagnose problems. The generated code is now
clearly commented and shows nesting levels, page numbers, and so
on. You can use the generated script when asking for support : we
can see the results you obtain without needing your datas or complete
application.
- To create standalone scripts : say your program uses a high level
environment to generate its output (databases, RML, etc...), using
this class would give you an equivalent program but with complete
independance from the high level environment (e.g. if you don't
have Oracle).
- To contribute some nice looking PDF documents to the ReportLab website
without having to send a complete application you don't want to
distribute.
- ... Insert your own ideas here ...
- For fun because you can do it !
"""
import cStringIO
from reportlab.pdfgen import canvas
from reportlab.pdfgen import pathobject
from reportlab.pdfgen import textobject
PyHeader = '''#! /usr/bin/env python
#
# This code was entirely generated by ReportLab (http://www.reportlab.com)
#
import sys
from reportlab.pdfgen import pathobject
from reportlab.pdfgen import textobject
from reportlab.lib.colors import Color
def doIt(file, regenerate=0) :
"""Generates a PDF document, save it into file.
file : either a filename or a file-like object.
regenerate : if set then this function returns the Python source
code which when run will produce the same result.
if unset then this function returns None, and is
much faster.
"""
if regenerate :
from reportlab.pdfgen.pycanvas import Canvas
else :
from reportlab.pdfgen.canvas import Canvas
'''
PyFooter = '''
# if we want the equivalent Python source code, then send it back
if regenerate :
return str(c)
if __name__ == "__main__" :
if len(sys.argv) != 2 :
# second argument must be the name of the PDF file to create
sys.stderr.write("%s needs one and only one argument\\n" % sys.argv[0])
sys.exit(-1)
else :
# we've got a filename, we can proceed.
print doIt(sys.argv[1], regenerate=1)
sys.exit(0)'''
def buildargs(*args, **kwargs) :
"""Constructs a printable list of arguments suitable for use in source function calls."""
arguments = ""
for arg in args :
arguments = arguments + ("%s, " % repr(arg))
for (kw, val) in kwargs.items() :
arguments = arguments+ ("%s=%s, " % (kw, repr(val)))
if arguments[-2:] == ", " :
arguments = arguments[:-2]
return arguments
class PDFAction :
"""Base class to fake method calls or attributes on PDF objects (Canvas, PDFPathObject, PDFTextObject)."""
def __init__(self, parent, action) :
"""Saves a pointer to the parent object, and the method name."""
self._parent = parent
self._action = action
def __getattr__(self, name) :
"""Probably a method call on an attribute, returns the real one."""
return getattr(getattr(self._parent._object, self._action), name)
def __call__(self, *args, **kwargs) :
"""The fake method is called, print it then call the real one."""
if not self._parent._parent._in :
self._precomment()
self._parent._parent._PyWrite(" %s.%s(%s)" % (self._parent._name, self._action, apply(buildargs, args, kwargs)))
self._postcomment()
self._parent._parent._in = self._parent._parent._in + 1
retcode = apply(getattr(self._parent._object, self._action), args, kwargs)
self._parent._parent._in = self._parent._parent._in - 1
return retcode
def __hash__(self) :
return hash(getattr(self._parent._object, self._action))
def __coerce__(self, other) :
"""Needed."""
return coerce(getattr(self._parent._object, self._action), other)
def _precomment(self) :
"""To be overriden."""
pass
def _postcomment(self) :
"""To be overriden."""
pass
class PDFObject :
"""Base class for PDF objects like PDFPathObject and PDFTextObject."""
_number = 0
def __init__(self, parent) :
"""Saves a pointer to the parent Canvas."""
self._parent = parent
self._initdone = 0
def __getattr__(self, name) :
"""The user's programs wants to call one of our methods or get an attribute, fake it."""
return PDFAction(self, name)
def __repr__(self) :
"""Returns the name used in the generated source code (e.g. 'p' or 't')."""
return self._name
def __call__(self, *args, **kwargs) :
"""Real object initialisation is made here, because now we've got the arguments."""
if not self._initdone :
self.__class__._number = self.__class__._number + 1
methodname = apply(self._postinit, args, kwargs)
self._parent._PyWrite("\n # create PDF%sObject number %i\n %s = %s.%s(%s)" % (methodname[5:], self.__class__._number, self._name, self._parent._name, methodname, apply(buildargs, args, kwargs)))
self._initdone = 1
return self
class Canvas :
"""Our fake Canvas class, which will intercept each and every method or attribute access."""
class TextObject(PDFObject) :
_name = "t"
def _postinit(self, *args, **kwargs) :
self._object = apply(textobject.PDFTextObject, (self._parent, ) + args, kwargs)
return "beginText"
class PathObject(PDFObject) :
_name = "p"
def _postinit(self, *args, **kwargs) :
self._object = apply(pathobject.PDFPathObject, args, kwargs)
return "beginPath"
class Action(PDFAction) :
"""Class called for every Canvas method call."""
def _precomment(self) :
"""Outputs comments before the method call."""
if self._action == "showPage" :
self._parent._PyWrite("\n # Ends page %i" % self._parent._pagenumber)
elif self._action == "saveState" :
state = {}
d = self._parent._object.__dict__
for name in self._parent._object.STATE_ATTRIBUTES:
state[name] = d[name]
self._parent._PyWrite("\n # Saves context level %i %s" % (self._parent._contextlevel, state))
self._parent._contextlevel = self._parent._contextlevel + 1
elif self._action == "restoreState" :
self._parent._contextlevel = self._parent._contextlevel - 1
self._parent._PyWrite("\n # Restores context level %i %s" % (self._parent._contextlevel, self._parent._object.state_stack[-1]))
elif self._action == "beginForm" :
self._parent._formnumber = self._parent._formnumber + 1
self._parent._PyWrite("\n # Begins form %i" % self._parent._formnumber)
elif self._action == "endForm" :
self._parent._PyWrite("\n # Ends form %i" % self._parent._formnumber)
elif self._action == "save" :
self._parent._PyWrite("\n # Saves the PDF document to disk")
def _postcomment(self) :
"""Outputs comments after the method call."""
if self._action == "showPage" :
self._parent._pagenumber = self._parent._pagenumber + 1
self._parent._PyWrite("\n # Begins page %i" % self._parent._pagenumber)
elif self._action in [ "endForm", "drawPath", "clipPath" ] :
self._parent._PyWrite("")
_name = "c"
def __init__(self, *args, **kwargs) :
"""Initialize and begins source code."""
self._parent = self # nice trick, isn't it ?
self._in = 0
self._contextlevel = 0
self._pagenumber = 1
self._formnumber = 0
self._footerpresent = 0
self._object = apply(canvas.Canvas, args, kwargs)
self._pyfile = cStringIO.StringIO()
self._PyWrite(PyHeader)
try :
del kwargs["filename"]
except KeyError :
pass
self._PyWrite(" # create the PDF document\n %s = Canvas(file, %s)\n\n # Begins page 1" % (self._name, apply(buildargs, args[1:], kwargs)))
def __nonzero__(self) :
"""This is needed by platypus' tables."""
return 1
def __str__(self) :
"""Returns the equivalent Python source code."""
if not self._footerpresent :
self._PyWrite(PyFooter)
self._footerpresent = 1
return self._pyfile.getvalue()
def __getattr__(self, name) :
"""Method or attribute access."""
if name == "beginPath" :
return self.PathObject(self)
elif name == "beginText" :
return self.TextObject(self)
else :
return self.Action(self, name)
def _PyWrite(self, pycode) :
"""Outputs the source code with a trailing newline."""
self._pyfile.write("%s\n" % pycode)
if __name__ == '__main__':
print 'For test scripts, look in reportlab/test'
| |
"""Tests for the Elmax config flow."""
from unittest.mock import patch
from elmax_api.exceptions import ElmaxBadLoginError, ElmaxBadPinError, ElmaxNetworkError
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.elmax.const import (
CONF_ELMAX_PANEL_ID,
CONF_ELMAX_PANEL_NAME,
CONF_ELMAX_PANEL_PIN,
CONF_ELMAX_PASSWORD,
CONF_ELMAX_USERNAME,
DOMAIN,
)
from homeassistant.config_entries import SOURCE_REAUTH
from tests.common import MockConfigEntry
from tests.components.elmax import (
MOCK_PANEL_ID,
MOCK_PANEL_NAME,
MOCK_PANEL_PIN,
MOCK_PASSWORD,
MOCK_USERNAME,
)
CONF_POLLING = "polling"
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_standard_setup(hass):
"""Test the standard setup case."""
# Setup once.
show_form_result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.elmax.async_setup_entry",
return_value=True,
):
login_result = await hass.config_entries.flow.async_configure(
show_form_result["flow_id"],
{
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
},
)
result = await hass.config_entries.flow.async_configure(
login_result["flow_id"],
{
CONF_ELMAX_PANEL_NAME: MOCK_PANEL_NAME,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
},
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_one_config_allowed(hass):
"""Test that only one Elmax configuration is allowed for each panel."""
MockConfigEntry(
domain=DOMAIN,
data={
CONF_ELMAX_PANEL_ID: MOCK_PANEL_ID,
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
},
unique_id=MOCK_PANEL_ID,
).add_to_hass(hass)
# Attempt to add another instance of the integration for the very same panel, it must fail.
show_form_result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
login_result = await hass.config_entries.flow.async_configure(
show_form_result["flow_id"],
{
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
},
)
result = await hass.config_entries.flow.async_configure(
login_result["flow_id"],
{
CONF_ELMAX_PANEL_NAME: MOCK_PANEL_NAME,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_invalid_credentials(hass):
"""Test that invalid credentials throws an error."""
with patch(
"elmax_api.http.Elmax.login",
side_effect=ElmaxBadLoginError(),
):
show_form_result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
login_result = await hass.config_entries.flow.async_configure(
show_form_result["flow_id"],
{
CONF_ELMAX_USERNAME: "wrong_user_name@email.com",
CONF_ELMAX_PASSWORD: "incorrect_password",
},
)
assert login_result["step_id"] == "user"
assert login_result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert login_result["errors"] == {"base": "invalid_auth"}
async def test_connection_error(hass):
"""Test other than invalid credentials throws an error."""
with patch(
"elmax_api.http.Elmax.login",
side_effect=ElmaxNetworkError(),
):
show_form_result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
login_result = await hass.config_entries.flow.async_configure(
show_form_result["flow_id"],
{
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
},
)
assert login_result["step_id"] == "user"
assert login_result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert login_result["errors"] == {"base": "network_error"}
async def test_unhandled_error(hass):
"""Test unhandled exceptions."""
with patch(
"elmax_api.http.Elmax.get_panel_status",
side_effect=Exception(),
):
show_form_result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
login_result = await hass.config_entries.flow.async_configure(
show_form_result["flow_id"],
{
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
},
)
result = await hass.config_entries.flow.async_configure(
login_result["flow_id"],
{
CONF_ELMAX_PANEL_NAME: MOCK_PANEL_NAME,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
},
)
assert result["step_id"] == "panels"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
async def test_invalid_pin(hass):
"""Test error is thrown when a wrong pin is used to pair a panel."""
# Simulate bad pin response.
with patch(
"elmax_api.http.Elmax.get_panel_status",
side_effect=ElmaxBadPinError(),
):
show_form_result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
login_result = await hass.config_entries.flow.async_configure(
show_form_result["flow_id"],
{
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
},
)
result = await hass.config_entries.flow.async_configure(
login_result["flow_id"],
{
CONF_ELMAX_PANEL_NAME: MOCK_PANEL_NAME,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
},
)
assert result["step_id"] == "panels"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_pin"}
async def test_no_online_panel(hass):
"""Test no-online panel is available."""
# Simulate low-level api returns no panels.
with patch(
"elmax_api.http.Elmax.list_control_panels",
return_value=[],
):
show_form_result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
login_result = await hass.config_entries.flow.async_configure(
show_form_result["flow_id"],
{
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
},
)
assert login_result["step_id"] == "user"
assert login_result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert login_result["errors"] == {"base": "no_panel_online"}
async def test_show_reauth(hass):
"""Test that the reauth form shows."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data={
CONF_ELMAX_PANEL_ID: MOCK_PANEL_ID,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
async def test_reauth_flow(hass):
"""Test that the reauth flow works."""
MockConfigEntry(
domain=DOMAIN,
data={
CONF_ELMAX_PANEL_ID: MOCK_PANEL_ID,
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
},
unique_id=MOCK_PANEL_ID,
).add_to_hass(hass)
# Trigger reauth
with patch(
"homeassistant.components.elmax.async_setup_entry",
return_value=True,
):
reauth_result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data={
CONF_ELMAX_PANEL_ID: MOCK_PANEL_ID,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
},
)
result = await hass.config_entries.flow.async_configure(
reauth_result["flow_id"],
{
CONF_ELMAX_PANEL_ID: MOCK_PANEL_ID,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
await hass.async_block_till_done()
assert result["reason"] == "reauth_successful"
async def test_reauth_panel_disappeared(hass):
"""Test that the case where panel is no longer associated with the user."""
# Simulate a first setup
MockConfigEntry(
domain=DOMAIN,
data={
CONF_ELMAX_PANEL_ID: MOCK_PANEL_ID,
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
},
unique_id=MOCK_PANEL_ID,
).add_to_hass(hass)
# Trigger reauth
with patch(
"elmax_api.http.Elmax.list_control_panels",
return_value=[],
):
reauth_result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data={
CONF_ELMAX_PANEL_ID: MOCK_PANEL_ID,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
},
)
result = await hass.config_entries.flow.async_configure(
reauth_result["flow_id"],
{
CONF_ELMAX_PANEL_ID: MOCK_PANEL_ID,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
},
)
assert result["step_id"] == "reauth_confirm"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "reauth_panel_disappeared"}
async def test_reauth_invalid_pin(hass):
"""Test that the case where panel is no longer associated with the user."""
MockConfigEntry(
domain=DOMAIN,
data={
CONF_ELMAX_PANEL_ID: MOCK_PANEL_ID,
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
},
unique_id=MOCK_PANEL_ID,
).add_to_hass(hass)
# Trigger reauth
with patch(
"elmax_api.http.Elmax.get_panel_status",
side_effect=ElmaxBadPinError(),
):
reauth_result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data={
CONF_ELMAX_PANEL_ID: MOCK_PANEL_ID,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
},
)
result = await hass.config_entries.flow.async_configure(
reauth_result["flow_id"],
{
CONF_ELMAX_PANEL_ID: MOCK_PANEL_ID,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
},
)
assert result["step_id"] == "reauth_confirm"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_pin"}
async def test_reauth_bad_login(hass):
"""Test bad login attempt at reauth time."""
MockConfigEntry(
domain=DOMAIN,
data={
CONF_ELMAX_PANEL_ID: MOCK_PANEL_ID,
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
},
unique_id=MOCK_PANEL_ID,
).add_to_hass(hass)
# Trigger reauth
with patch(
"elmax_api.http.Elmax.login",
side_effect=ElmaxBadLoginError(),
):
reauth_result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data={
CONF_ELMAX_PANEL_ID: MOCK_PANEL_ID,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
},
)
result = await hass.config_entries.flow.async_configure(
reauth_result["flow_id"],
{
CONF_ELMAX_PANEL_ID: MOCK_PANEL_ID,
CONF_ELMAX_PANEL_PIN: MOCK_PANEL_PIN,
CONF_ELMAX_USERNAME: MOCK_USERNAME,
CONF_ELMAX_PASSWORD: MOCK_PASSWORD,
},
)
assert result["step_id"] == "reauth_confirm"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_auth"}
| |
# (c) Copyright [2016] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import uuid
from oslo_serialization import base64
from oslo_utils import importutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
from hpedockerplugin import exception
from hpedockerplugin.i18n import _, _LE, _LI, _LW
hpe3parclient = importutils.try_import("hpe3parclient")
if hpe3parclient:
from hpe3parclient import client
from hpe3parclient import exceptions as hpeexceptions
LOG = logging.getLogger(__name__)
MIN_CLIENT_VERSION = '4.0.0'
DEDUP_API_VERSION = 30201120
FLASH_CACHE_API_VERSION = 30201200
hpe3par_opts = [
cfg.StrOpt('hpe3par_api_url',
default='',
help="3PAR WSAPI Server Url like "
"https://<3par ip>:8080/api/v1",
deprecated_name='hp3par_api_url'),
cfg.StrOpt('hpe3par_username',
default='',
help="3PAR username with the 'edit' role",
deprecated_name='hp3par_username'),
cfg.StrOpt('hpe3par_password',
default='',
help="3PAR password for the user specified in hpe3par_username",
secret=True,
deprecated_name='hp3par_password'),
cfg.ListOpt('hpe3par_cpg',
default=["OpenStack"],
help="List of the CPG(s) to use for volume creation",
deprecated_name='hp3par_cpg'),
cfg.BoolOpt('hpe3par_debug',
default=False,
help="Enable HTTP debugging to 3PAR",
deprecated_name='hp3par_debug'),
cfg.ListOpt('hpe3par_iscsi_ips',
default=[],
help="List of target iSCSI addresses to use.",
deprecated_name='hp3par_iscsi_ips'),
cfg.BoolOpt('hpe3par_iscsi_chap_enabled',
default=False,
help="Enable CHAP authentication for iSCSI connections.",
deprecated_name='hp3par_iscsi_chap_enabled'),
cfg.BoolOpt('strict_ssh_host_key_policy',
default=False,
help='Option to enable strict host key checking. When '
'set to "True" the plugin will only connect to systems '
'with a host key present in the configured '
'"ssh_hosts_key_file". When set to "False" the host key '
'will be saved upon first connection and used for '
'subsequent connections. Default=False'),
cfg.StrOpt('ssh_hosts_key_file',
default='$state_path/ssh_known_hosts',
help='File containing SSH host keys for the systems with which '
'the plugin needs to communicate. OPTIONAL: '
'Default=$state_path/ssh_known_hosts'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(hpe3par_opts)
class HPE3PARCommon(object):
"""Class that contains common code for the 3PAR drivers.
Version history:
.. code-block:: none
0.0.1 - Initial version of 3PAR common created.
0.0.2 - Added the ability to choose volume provisionings.
0.0.3 - Added support for flash cache.
"""
VERSION = "0.0.3"
# TODO(Ramy): move these to the 3PAR Client
VLUN_TYPE_EMPTY = 1
VLUN_TYPE_PORT = 2
VLUN_TYPE_HOST = 3
VLUN_TYPE_MATCHED_SET = 4
VLUN_TYPE_HOST_SET = 5
THIN = 2
DEDUP = 6
CONVERT_TO_THIN = 1
CONVERT_TO_FULL = 2
CONVERT_TO_DEDUP = 3
# Valid values for volume type extra specs
# The first value in the list is the default value
valid_prov_values = ['thin', 'full', 'dedup']
valid_persona_values = ['2 - Generic-ALUA',
'1 - Generic',
'3 - Generic-legacy',
'4 - HPUX-legacy',
'5 - AIX-legacy',
'6 - EGENERA',
'7 - ONTAP-legacy',
'8 - VMware',
'9 - OpenVMS',
'10 - HPUX',
'11 - WindowsServer']
hpe_qos_keys = ['minIOPS', 'maxIOPS', 'minBWS', 'maxBWS', 'latency',
'priority']
qos_priority_level = {'low': 1, 'normal': 2, 'high': 3}
hpe3par_valid_keys = ['cpg', 'snap_cpg', 'provisioning', 'persona', 'vvs',
'flash_cache']
def __init__(self, config):
self.config = config
self.client = None
self.uuid = uuid.uuid4()
def get_version(self):
return self.VERSION
def check_flags(self, options, required_flags):
for flag in required_flags:
if not getattr(options, flag, None):
msg = _('%s is not set') % flag
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def _create_client(self):
cl = client.HPE3ParClient(
self.config.hpe3par_api_url,
suppress_ssl_warnings=CONF.suppress_requests_ssl_warnings)
client_version = hpe3parclient.version
if client_version < MIN_CLIENT_VERSION:
ex_msg = (_('Invalid hpe3parclient version found (%(found)s). '
'Version %(minimum)s or greater required. Run "pip'
' install --upgrade python-3parclient" to upgrade'
' the hpe3parclient.')
% {'found': client_version,
'minimum': MIN_CLIENT_VERSION})
LOG.error(ex_msg)
raise exception.InvalidInput(reason=ex_msg)
return cl
def client_login(self):
try:
LOG.debug("Connecting to 3PAR")
self.client.login(self.config.hpe3par_username,
self.config.hpe3par_password)
except hpeexceptions.HTTPUnauthorized as ex:
msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") %
{'url': self.config.hpe3par_api_url, 'err': ex})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
known_hosts_file = CONF.ssh_hosts_key_file
policy = "AutoAddPolicy"
if CONF.strict_ssh_host_key_policy:
policy = "RejectPolicy"
self.client.setSSHOptions(
self.config.san_ip,
self.config.san_login,
self.config.san_password,
port=self.config.san_ssh_port,
conn_timeout=self.config.ssh_conn_timeout,
privatekey=self.config.san_private_key,
missing_key_policy=policy,
known_hosts_file=known_hosts_file)
def client_logout(self):
LOG.debug("Disconnect from 3PAR REST and SSH %s", self.uuid)
self.client.logout()
def do_setup(self):
if hpe3parclient is None:
msg = _('You must install hpe3parclient before using 3PAR'
' drivers. Run "pip install python-3parclient" to'
' install the hpe3parclient.')
raise exception.VolumeBackendAPIException(data=msg)
try:
self.client = self._create_client()
wsapi_version = self.client.getWsApiVersion()
self.API_VERSION = wsapi_version['build']
except hpeexceptions.UnsupportedVersion as ex:
raise exception.InvalidInput(ex)
if self.config.hpe3par_debug:
self.client.debug_rest(True)
def check_for_setup_error(self):
LOG.info(_LI("HPE3PARCommon %(common_ver)s,"
"hpe3parclient %(rest_ver)s"),
{"common_ver": self.VERSION,
"rest_ver": hpe3parclient.get_version_string()})
self.client_login()
try:
cpg_names = self.config.hpe3par_cpg
for cpg_name in cpg_names:
self.validate_cpg(cpg_name)
finally:
self.client_logout()
def validate_cpg(self, cpg_name):
try:
self.client.getCPG(cpg_name)
except hpeexceptions.HTTPNotFound:
err = (_("CPG (%s) doesn't exist on array") % cpg_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
def get_domain(self, cpg_name):
try:
cpg = self.client.getCPG(cpg_name)
except hpeexceptions.HTTPNotFound:
err = (_("Failed to get domain because CPG (%s) doesn't "
"exist on array.") % cpg_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
if 'domain' in cpg:
return cpg['domain']
return None
def _get_3par_vol_name(self, volume_id):
"""Get converted 3PAR volume name.
Converts the openstack volume id from
ecffc30f-98cb-4cf5-85ee-d7309cc17cd2
to
dcv-7P.DD5jLTPWF7tcwnMF80g
We convert the 128 bits of the uuid into a 24character long
base64 encoded string to ensure we don't exceed the maximum
allowed 31 character name limit on 3Par
We strip the padding '=' and replace + with .
and / with -
"""
volume_name = self._encode_name(volume_id)
return "dcv-%s" % volume_name
def _get_3par_vvs_name(self, volume_id):
vvs_name = self._encode_name(volume_id)
return "vvs-%s" % vvs_name
def _encode_name(self, name):
uuid_str = name.replace("-", "")
vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)
vol_encoded = base64.encode_as_text(vol_uuid.bytes)
# 3par doesn't allow +, nor /
vol_encoded = vol_encoded.replace('+', '.')
vol_encoded = vol_encoded.replace('/', '-')
# strip off the == as 3par doesn't like those.
vol_encoded = vol_encoded.replace('=', '')
return vol_encoded
def _capacity_from_size(self, vol_size):
# because 3PAR volume sizes are in Mebibytes.
if int(vol_size) == 0:
capacity = units.Gi # default: 1GiB
else:
capacity = vol_size * units.Gi
capacity = int(math.ceil(capacity / units.Mi))
return capacity
def _delete_3par_host(self, hostname):
self.client.deleteHost(hostname)
def _create_3par_vlun(self, volume, hostname, nsp):
try:
location = None
if nsp is None:
location = self.client.createVLUN(volume, hostname=hostname,
auto=True)
else:
port = self.build_portPos(nsp)
location = self.client.createVLUN(volume, hostname=hostname,
auto=True, portPos=port)
vlun_info = None
if location:
# The LUN id is returned as part of the location URI
vlun = location.split(',')
vlun_info = {'volume_name': vlun[0],
'lun_id': int(vlun[1]),
'host_name': vlun[2],
}
if len(vlun) > 3:
vlun_info['nsp'] = vlun[3]
return vlun_info
except hpeexceptions.HTTPBadRequest as e:
if 'must be in the same domain' in e.get_description():
LOG.error(e.get_description())
raise exception.Invalid3PARDomain(err=e.get_description())
def _safe_hostname(self, hostname):
"""We have to use a safe hostname length for 3PAR host names."""
try:
index = hostname.index('.')
except ValueError:
# couldn't find it
index = len(hostname)
# we'll just chop this off for now.
if index > 31:
index = 31
return hostname[:index]
def _get_3par_host(self, hostname):
return self.client.getHost(hostname)
def get_ports(self):
return self.client.getPorts()
def get_active_target_ports(self):
ports = self.get_ports()
target_ports = []
for port in ports['members']:
if (
port['mode'] == self.client.PORT_MODE_TARGET and
port['linkState'] == self.client.PORT_STATE_READY
):
port['nsp'] = self.build_nsp(port['portPos'])
target_ports.append(port)
return target_ports
def get_active_iscsi_target_ports(self):
ports = self.get_active_target_ports()
iscsi_ports = []
for port in ports:
if port['protocol'] == self.client.PORT_PROTO_ISCSI:
iscsi_ports.append(port)
return iscsi_ports
def _get_vlun(self, volume_name, hostname, lun_id=None, nsp=None):
"""find a VLUN on a 3PAR host."""
vluns = self.client.getHostVLUNs(hostname)
found_vlun = None
for vlun in vluns:
if volume_name in vlun['volumeName']:
if lun_id is not None:
if vlun['lun'] == lun_id:
if nsp:
port = self.build_portPos(nsp)
if vlun['portPos'] == port:
found_vlun = vlun
break
else:
found_vlun = vlun
break
else:
found_vlun = vlun
break
if found_vlun is None:
LOG.info(_LI("3PAR vlun %(name)s not found on host %(host)s"),
{'name': volume_name, 'host': hostname})
return found_vlun
def create_vlun(self, volume, host, nsp=None):
"""Create a VLUN.
In order to export a volume on a 3PAR box, we have to create a VLUN.
"""
volume_name = self._get_3par_vol_name(volume['id'])
vlun_info = self._create_3par_vlun(volume_name, host['name'], nsp)
return self._get_vlun(volume_name,
host['name'],
vlun_info['lun_id'],
nsp)
def delete_vlun(self, volume, hostname):
volume_name = self._get_3par_vol_name(volume['id'])
vluns = self.client.getHostVLUNs(hostname)
# Find all the VLUNs associated with the volume. The VLUNs will then
# be split into groups based on the active status of the VLUN. If there
# are active VLUNs detected a delete will be attempted on them. If
# there are no active VLUNs but there are inactive VLUNs, then the
# inactive VLUNs will be deleted. The inactive VLUNs are the templates
# on the 3PAR backend.
active_volume_vluns = []
inactive_volume_vluns = []
volume_vluns = []
for vlun in vluns:
if volume_name in vlun['volumeName']:
if vlun['active']:
active_volume_vluns.append(vlun)
else:
inactive_volume_vluns.append(vlun)
if active_volume_vluns:
volume_vluns = active_volume_vluns
elif inactive_volume_vluns:
volume_vluns = inactive_volume_vluns
if not volume_vluns:
msg = (
_LW("3PAR vlun for volume %(name)s not found on "
"host %(host)s"), {'name': volume_name, 'host': hostname})
LOG.warning(msg)
return
# VLUN Type of MATCHED_SET 4 requires the port to be provided
removed_luns = []
for vlun in volume_vluns:
if self.VLUN_TYPE_MATCHED_SET == vlun['type']:
self.client.deleteVLUN(volume_name, vlun['lun'], hostname,
vlun['portPos'])
else:
# This is HOST_SEES or a type that is not MATCHED_SET.
# By deleting one VLUN, all the others should be deleted, too.
if vlun['lun'] not in removed_luns:
self.client.deleteVLUN(volume_name, vlun['lun'], hostname)
removed_luns.append(vlun['lun'])
# Determine if there are other volumes attached to the host.
# This will determine whether we should try removing host from host set
# and deleting the host.
vluns = []
try:
vluns = self.client.getHostVLUNs(hostname)
except hpeexceptions.HTTPNotFound:
LOG.debug("All VLUNs removed from host %s", hostname)
pass
for vlun in vluns:
if volume_name not in vlun['volumeName']:
# Found another volume
break
else:
# We deleted the last vlun, so try to delete the host too.
# This check avoids the old unnecessary try/fail when vluns exist
# but adds a minor race condition if a vlun is manually deleted
# externally at precisely the wrong time. Worst case is leftover
# host, so it is worth the unlikely risk.
try:
self._delete_3par_host(hostname)
except Exception as ex:
# Any exception down here is only logged. The vlun is deleted.
# If the host is in a host set, the delete host will fail and
# the host will remain in the host set. This is desired
# because docker was not responsible for the host set
# assignment. The host set could be used outside of docker
# for future needs (e.g. export volume to host set).
# The log info explains why the host was left alone.
LOG.info(_LI("3PAR vlun for volume '%(name)s' was deleted, "
"but the host '%(host)s' was not deleted "
"because: %(reason)s"),
{'name': volume_name, 'host': hostname,
'reason': ex.get_description()})
def _get_key_value(self, hpe3par_keys, key, default=None):
if hpe3par_keys is not None and key in hpe3par_keys:
return hpe3par_keys[key]
else:
return default
def _get_keys_by_volume_type(self, volume_type):
hpe3par_keys = {}
specs = volume_type.get('extra_specs')
for key, value in specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.hpe3par_valid_keys:
hpe3par_keys[key] = value
return hpe3par_keys
def get_cpg(self, volume, allowSnap=False):
volume_name = self._get_3par_vol_name(volume['id'])
vol = self.client.getVolume(volume_name)
if 'userCPG' in vol:
return vol['userCPG']
elif allowSnap:
return vol['snapCPG']
return None
def _get_3par_vol_comment(self, volume_name):
vol = self.client.getVolume(volume_name)
if 'comment' in vol:
return vol['comment']
return None
def create_volume(self, volume):
LOG.debug('CREATE VOLUME (%(disp_name)s: %(vol_name)s %(id)s on '
'%(host)s)',
{'disp_name': volume['display_name'],
'vol_name': volume['name'],
'id': self._get_3par_vol_name(volume['id']),
'host': volume['host']})
try:
comments = {'volume_id': volume['id'],
'name': volume['name'],
'type': 'Docker'}
name = volume.get('display_name', None)
if name:
comments['display_name'] = name
# TODO(leeantho): Choose the first CPG for now. In the future
# support selecting different CPGs if multiple are provided.
cpg = self.config.hpe3par_cpg[0]
# check for valid provisioning type
prov_value = volume['provisioning']
if prov_value not in self.valid_prov_values:
err = (_("Must specify a valid provisioning type %(valid)s, "
"value '%(prov)s' is invalid.") %
{'valid': self.valid_prov_values,
'prov': prov_value})
LOG.error(err)
raise exception.InvalidInput(reason=err)
tpvv = True
tdvv = False
if prov_value == "full":
tpvv = False
elif prov_value == "dedup":
tpvv = False
tdvv = True
if tdvv and (self.API_VERSION < DEDUP_API_VERSION):
err = (_("Dedup is a valid provisioning type, "
"but requires WSAPI version '%(dedup_version)s' "
"version '%(version)s' is installed.") %
{'dedup_version': DEDUP_API_VERSION,
'version': self.API_VERSION})
LOG.error(err)
raise exception.InvalidInput(reason=err)
extras = {'comment': json.dumps(comments),
'tpvv': tpvv, }
# Only set the dedup option if the backend supports it.
if self.API_VERSION >= DEDUP_API_VERSION:
extras['tdvv'] = tdvv
capacity = self._capacity_from_size(volume['size'])
volume_name = self._get_3par_vol_name(volume['id'])
self.client.createVolume(volume_name, cpg, capacity, extras)
# Check if flash cache needs to be enabled
flash_cache = self.get_flash_cache_policy(volume['flash_cache'])
if flash_cache is not None:
try:
self._add_volume_to_volume_set(volume, volume_name,
cpg, flash_cache)
except exception.InvalidInput as ex:
# Delete the volume if unable to add it to the volume set
self.client.deleteVolume(volume_name)
LOG.error(_LE("Exception: %s"), ex)
raise exception.PluginException(ex)
except hpeexceptions.HTTPConflict:
msg = _("Volume (%s) already exists on array") % volume_name
LOG.error(msg)
raise exception.Duplicate(msg)
except hpeexceptions.HTTPBadRequest as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.Invalid(ex.get_description())
except exception.InvalidInput as ex:
LOG.error(_LE("Exception: %s"), ex)
raise
except exception.PluginException as ex:
LOG.error(_LE("Exception: %s"), ex)
raise
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.PluginException(ex)
def delete_volume(self, volume):
try:
volume_name = self._get_3par_vol_name(volume['id'])
# Try and delete the volume, it might fail here because
# the volume is part of a volume set which will have the
# volume set name in the error.
try:
self.client.deleteVolume(volume_name)
except hpeexceptions.HTTPBadRequest as ex:
if ex.get_code() == 29:
if self.client.isOnlinePhysicalCopy(volume_name):
LOG.debug("Found an online copy for %(volume)s",
{'volume': volume_name})
# the volume is in process of being cloned.
# stopOnlinePhysicalCopy will also delete
# the volume once it stops the copy.
self.client.stopOnlinePhysicalCopy(volume_name)
else:
LOG.error(_LE("Exception: %s"), ex)
raise
else:
LOG.error(_LE("Exception: %s"), ex)
raise
except hpeexceptions.HTTPConflict as ex:
if ex.get_code() == 34:
# This is a special case which means the
# volume is part of a volume set.
vvset_name = self.client.findVolumeSet(volume_name)
LOG.debug("Returned vvset_name = %s", vvset_name)
if vvset_name is not None and \
vvset_name.startswith('vvs-'):
# We have a single volume per volume set, so
# remove the volume set.
self.client.deleteVolumeSet(
self._get_3par_vvs_name(volume['id']))
elif vvset_name is not None:
# We have a pre-defined volume set just remove the
# volume and leave the volume set.
self.client.removeVolumeFromVolumeSet(vvset_name,
volume_name)
self.client.deleteVolume(volume_name)
elif (ex.get_code() == 151 or ex.get_code() == 32):
# the volume is being operated on in a background
# task on the 3PAR.
# TODO(walter-boring) do a retry a few times.
# for now lets log a better message
msg = _("The volume is currently busy on the 3PAR"
" and cannot be deleted at this time. "
"You can try again later.")
LOG.error(msg)
raise exception.VolumeIsBusy(message=msg)
else:
LOG.error(_LE("Exception: %s"), ex)
raise exception.VolumeIsBusy(message=ex.get_description())
except hpeexceptions.HTTPNotFound as ex:
LOG.warning(_LW("Delete volume id not found. Ex: %(msg)s"),
{'msg': ex})
except hpeexceptions.HTTPForbidden as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.NotAuthorized(ex.get_description())
except hpeexceptions.HTTPConflict as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.VolumeIsBusy(message=ex.get_description())
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.PluginException(ex)
def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns):
if wwns is not None and not isinstance(wwns, list):
wwns = [wwns]
if iqns is not None and not isinstance(iqns, list):
iqns = [iqns]
out = self.client.getHosts()
hosts = out['members']
for host in hosts:
if 'iSCSIPaths' in host and iqns is not None:
iscsi_paths = host['iSCSIPaths']
for iscsi in iscsi_paths:
for iqn in iqns:
if iqn == iscsi['name']:
return host['name']
if 'FCPaths' in host and wwns is not None:
fc_paths = host['FCPaths']
for fc in fc_paths:
for wwn in wwns:
if wwn == fc['wwn']:
return host['name']
def terminate_connection(self, volume, hostname, wwn=None, iqn=None):
"""Driver entry point to unattach a volume from an instance."""
# does 3par know this host by a different name?
hosts = None
if wwn:
hosts = self.client.queryHost(wwns=wwn)
elif iqn:
hosts = self.client.queryHost(iqns=[iqn])
if hosts and hosts['members'] and 'name' in hosts['members'][0]:
hostname = hosts['members'][0]['name']
try:
self.delete_vlun(volume, hostname)
return
except hpeexceptions.HTTPNotFound as e:
if 'host does not exist' in e.get_description():
# use the wwn to see if we can find the hostname
hostname = self._get_3par_hostname_from_wwn_iqn(wwn, iqn)
# no 3par host, re-throw
if hostname is None:
LOG.error(_LE("Exception: %s"), e)
raise
else:
# not a 'host does not exist' HTTPNotFound exception, re-throw
LOG.error(_LE("Exception: %s"), e)
raise
# try again with name retrieved from 3par
self.delete_vlun(volume, hostname)
def build_nsp(self, portPos):
return '%s:%s:%s' % (portPos['node'],
portPos['slot'],
portPos['cardPort'])
def build_portPos(self, nsp):
split = nsp.split(":")
portPos = {}
portPos['node'] = int(split[0])
portPos['slot'] = int(split[1])
portPos['cardPort'] = int(split[2])
return portPos
def find_existing_vlun(self, volume, host):
"""Finds an existing VLUN for a volume on a host.
Returns an existing VLUN's information. If no existing VLUN is found,
None is returned.
:param volume: A dictionary describing a volume.
:param host: A dictionary describing a host.
"""
existing_vlun = None
try:
vol_name = self._get_3par_vol_name(volume['id'])
host_vluns = self.client.getHostVLUNs(host['name'])
# The first existing VLUN found will be returned.
for vlun in host_vluns:
if vlun['volumeName'] == vol_name:
existing_vlun = vlun
break
except hpeexceptions.HTTPNotFound:
# ignore, no existing VLUNs were found
LOG.debug("No existing VLUNs were found for host/volume "
"combination: %(host)s, %(vol)s",
{'host': host['name'],
'vol': vol_name})
pass
return existing_vlun
def find_existing_vluns(self, volume, host):
existing_vluns = []
try:
vol_name = self._get_3par_vol_name(volume['id'])
host_vluns = self.client.getHostVLUNs(host['name'])
for vlun in host_vluns:
if vlun['volumeName'] == vol_name:
existing_vluns.append(vlun)
except hpeexceptions.HTTPNotFound:
# ignore, no existing VLUNs were found
LOG.debug("No existing VLUNs were found for host/volume "
"combination: %(host)s, %(vol)s",
{'host': host['name'],
'vol': vol_name})
pass
return existing_vluns
def get_flash_cache_policy(self, flash_cache):
if flash_cache is not None:
# If requested, see if supported on back end
if self.API_VERSION < FLASH_CACHE_API_VERSION:
err = (_("Flash Cache Policy requires "
"WSAPI version '%(fcache_version)s' "
"version '%(version)s' is installed.") %
{'fcache_version': FLASH_CACHE_API_VERSION,
'version': self.API_VERSION})
LOG.error(err)
raise exception.InvalidInput(reason=err)
else:
if flash_cache.lower() == 'true':
return self.client.FLASH_CACHE_ENABLED
else:
return self.client.FLASH_CACHE_DISABLED
return None
def _set_flash_cache_policy_in_vvs(self, flash_cache, vvs_name):
# Update virtual volume set
if flash_cache:
try:
self.client.modifyVolumeSet(vvs_name,
flashCachePolicy=flash_cache)
LOG.info(_LI("Flash Cache policy set to %s"), flash_cache)
except Exception as ex:
LOG.error(_LE("Error setting Flash Cache policy "
"to %s - exception"), flash_cache)
exception.PluginException(ex)
def _add_volume_to_volume_set(self, volume, volume_name,
cpg, flash_cache):
vvs_name = self._get_3par_vvs_name(volume['id'])
domain = self.get_domain(cpg)
self.client.createVolumeSet(vvs_name, domain)
try:
self._set_flash_cache_policy_in_vvs(flash_cache, vvs_name)
self.client.addVolumeToVolumeSet(vvs_name, volume_name)
except Exception as ex:
# Cleanup the volume set if unable to create the qos rule
# or flash cache policy or add the volume to the volume set
self.client.deleteVolumeSet(vvs_name)
raise exception.PluginException(ex)
| |
# Copyright (C) 2016 Fan Long, Martin Rianrd and MIT CSAIL
# Prophet
#
# This file is part of Prophet.
#
# Prophet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Prophet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Prophet. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/env python
from sys import argv
import getopt
from os import chdir, getcwd, system, path, environ
import subprocess
cases = [
"test_grammar",
"test_opcodes",
"test_dict",
"test_builtin",
"test_exceptions",
"test_types",
"test_unittest",
"test_doctest",
"test_doctest2",
"test___all__",
"test___future__",
"test__locale",
"test_abc",
"test_abstract_numbers",
"test_aifc",
"test_argparse",
"test_array",
"test_ast",
"test_asynchat",
"test_asyncore",
"test_atexit",
"test_audioop",
"test_augassign",
"test_base64",
"test_bigaddrspace",
"test_bigmem",
"test_binascii",
"test_binhex",
"test_binop",
"test_bisect",
"test_bool",
"test_bufio",
"test_bytes",
"test_bz2",
"test_calendar",
"test_call",
"test_capi",
"test_cgi",
"test_charmapcodec",
"test_class",
"test_cmath",
"test_cmd",
"test_cmd_line",
"test_cmd_line_script",
"test_code",
"test_codeccallbacks",
"test_codecencodings_cn",
"test_codecencodings_hk",
"test_codecencodings_jp",
"test_codecencodings_kr",
"test_codecencodings_tw",
"test_codecs",
"test_codeop",
"test_coding",
"test_collections",
"test_colorsys",
"test_compare",
"test_compile",
"test_compileall",
"test_complex",
"test_concurrent_futures",
"test_configparser",
"test_contains",
"test_contextlib",
"test_copy",
"test_copyreg",
"test_cprofile",
"test_crashers",
"test_crypt",
"test_csv",
"test_ctypes",
"test_datetime",
"test_dbm",
"test_dbm_dumb",
"test_dbm_gnu",
"test_dbm_ndbm",
"test_decimal",
"test_decorators",
"test_defaultdict",
"test_deque",
"test_descr",
"test_descrtut",
"test_dictcomps",
"test_dictviews",
"test_difflib",
"test_dis",
"test_distutils",
"test_docxmlrpc",
"test_dummy_thread",
"test_dummy_threading",
"test_dynamic",
"test_email",
"test_enumerate",
"test_eof",
"test_epoll",
"test_errno",
"test_exception_variations",
"test_extcall",
"test_faulthandler",
"test_fcntl",
"test_file",
"test_filecmp",
"test_fileinput",
"test_fileio",
"test_float",
"test_flufl",
"test_fnmatch",
"test_fork1",
"test_format",
"test_fractions",
"test_frozen",
"test_ftplib",
"test_funcattrs",
"test_functools",
"test_future",
"test_future3",
"test_future4",
"test_future5",
"test_gc",
"test_generators",
"test_genericpath",
"test_genexps",
"test_getargs2",
"test_getopt",
"test_gettext",
"test_glob",
"test_global",
"test_grp",
"test_gzip",
"test_hash",
"test_hashlib",
"test_heapq",
"test_hmac",
"test_html",
"test_htmlparser",
"test_http_cookiejar",
"test_http_cookies",
"test_httplib",
"test_httpservers",
"test_imaplib",
"test_imp",
"test_import",
"test_importhooks",
"test_importlib",
"test_index",
"test_inspect",
"test_int",
"test_int_literal",
"test_io",
"test_ioctl",
"test_isinstance",
"test_iter",
"test_iterlen",
"test_itertools",
"test_json",
"test_keywordonlyarg",
"test_largefile",
"test_lib2to3",
"test_linecache",
"test_list",
"test_listcomps",
"test_locale",
"test_long",
"test_longexp",
"test_macpath",
"test_mailbox",
"test_marshal",
"test_math",
"test_memoryio",
"test_memoryview",
"test_metaclass",
"test_mimetypes",
"test_minidom",
"test_mmap",
"test_module",
"test_modulefinder",
"test_multibytecodec",
"test_multibytecodec_support",
"test_multiprocessing",
"test_mutants",
"test_netrc",
"test_nis",
"test_nntplib",
"test_normalization",
"test_ntpath",
"test_numeric_tower",
"test_openpty",
"test_operator",
"test_optparse",
"test_os",
"test_osx_env",
"test_packaging",
"test_parser",
"test_pdb",
"test_peepholer",
"test_pep247",
"test_pep263",
"test_pep277",
"test_pep292",
"test_pep3120",
"test_pep3131",
"test_pep352",
"test_pickle",
"test_pickletools",
"test_pipes",
"test_pkg",
"test_pkgimport",
"test_pkgutil",
"test_platform",
"test_plistlib",
"test_poll",
"test_popen",
"test_poplib",
"test_posix",
"test_posixpath",
"test_pow",
"test_pprint",
"test_print",
"test_profile",
"test_property",
"test_pstats",
"test_pty",
"test_pulldom",
"test_pwd",
"test_pyclbr",
"test_pydoc",
"test_pyexpat",
"test_queue",
"test_quopri",
"test_raise",
"test_random",
"test_range",
"test_re",
"test_readline",
"test_reprlib",
"test_resource",
"test_richcmp",
"test_rlcompleter",
"test_robotparser",
"test_runpy",
"test_sax",
"test_sched",
"test_scope",
"test_select",
"test_set",
"test_setcomps",
"test_shelve",
"test_shlex",
"test_shutil",
"test_signal",
"test_site",
"test_slice",
"test_smtpd",
"test_smtplib",
"test_sndhdr",
"test_socket",
"test_sort",
"test_sqlite",
"test_ssl",
"test_strftime",
"test_string",
"test_stringprep",
"test_strlit",
"test_strptime",
"test_strtod",
"test_struct",
"test_structmembers",
"test_structseq",
"test_subprocess",
"test_sunau",
"test_sundry",
"test_super",
"test_symtable",
"test_syntax",
"test_sys",
"test_sys_setprofile",
"test_sys_settrace",
"test_sysconfig",
"test_syslog",
"test_tarfile",
"test_telnetlib",
"test_tempfile",
"test_textwrap",
"test_thread",
"test_threading",
"test_threaded_import",
"test_threadedtempfile",
"test_threading_local",
"test_threadsignals",
"test_time",
"test_timeit",
"test_tokenize",
"test_trace",
"test_traceback",
"test_tuple",
"test_typechecks",
"test_ucn",
"test_unary",
"test_unicode",
"test_unicode_file",
"test_unicodedata",
"test_univnewlines",
"test_unpack",
"test_unpack_ex",
"test_urllib",
"test_urllib2",
"test_urllib2_localnet",
"test_urllib_response",
"test_urlparse",
"test_userdict",
"test_userlist",
"test_userstring",
"test_uu",
"test_uuid",
"test_wait3",
"test_wait4",
"test_warnings",
"test_wave",
"test_weakref",
"test_weakset",
"test_with",
"test_wsgiref",
"test_xdrlib",
"test_xml_etree",
"test_xml_etree_c",
"test_xmlrpc",
"test_zipfile",
"test_zipimport",
"test_zipimport_support",
"test_zlib"];
if __name__ == "__main__":
opts, args = getopt.getopt(argv[1 :], "p:");
profile_dir = "";
for o, a in opts:
if o == "-p":
profile_dir = a;
src_dir = args[0];
test_dir = args[1];
work_dir = args[2];
if (len(args) > 3):
ids = args[3 :];
cur_dir = src_dir;
if (profile_dir != ""):
cur_dir = profile_dir;
if (not path.exists(cur_dir + "/Lib/oldtest")):
system("mv " + cur_dir + "/Lib/test " + cur_dir + "/Lib/oldtest");
system("cp -rf " + test_dir + " " + cur_dir + "/Lib/test");
# system("cp -rf " + cur_dir + "/Lib/oldtest/regrtest.py " + cur_dir + "/Lib/test/regrtest.py");
ori_dir = getcwd();
chdir(cur_dir);
my_env = environ;
for i in ids:
case_str = cases[int(i) - 1];
ret = subprocess.call(["timeout 40s ./python Lib/test/regrtest.py " + case_str + " 1>/dev/null 2>/dev/null"], shell = True);
if (ret == 0):
print i,
chdir(ori_dir);
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=R0904, C0111, C0302
"""
This module contains various unit tests for
functions in CloudDLPHook
"""
import unittest
from typing import Any, Dict
import mock
from google.cloud.dlp_v2.types import DlpJob
from mock import PropertyMock
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.dlp import CloudDLPHook
from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_no_default_project_id
API_RESPONSE = {} # type: Dict[Any, Any]
ORGANIZATION_ID = "test-org"
ORGANIZATION_PATH = "organizations/{}".format(ORGANIZATION_ID)
PROJECT_ID = "test-project"
PROJECT_PATH = "projects/{}".format(PROJECT_ID)
DLP_JOB_ID = "job123"
DLP_JOB_PATH = "projects/{}/dlpJobs/{}".format(PROJECT_ID, DLP_JOB_ID)
TEMPLATE_ID = "template123"
STORED_INFO_TYPE_ID = "type123"
TRIGGER_ID = "trigger123"
DEIDENTIFY_TEMPLATE_ORGANIZATION_PATH = "organizations/{}/deidentifyTemplates/{}".format(
ORGANIZATION_ID, TEMPLATE_ID
)
INSPECT_TEMPLATE_ORGANIZATION_PATH = "organizations/{}/inspectTemplates/{}".format(
ORGANIZATION_ID, TEMPLATE_ID
)
STORED_INFO_TYPE_ORGANIZATION_PATH = "organizations/{}/storedInfoTypes/{}".format(
ORGANIZATION_ID, STORED_INFO_TYPE_ID
)
DEIDENTIFY_TEMPLATE_PROJECT_PATH = "projects/{}/deidentifyTemplates/{}".format(
PROJECT_ID, TEMPLATE_ID
)
INSPECT_TEMPLATE_PROJECT_PATH = "projects/{}/inspectTemplates/{}".format(
PROJECT_ID, TEMPLATE_ID
)
STORED_INFO_TYPE_PROJECT_PATH = "projects/{}/storedInfoTypes/{}".format(
PROJECT_ID, STORED_INFO_TYPE_ID
)
JOB_TRIGGER_PATH = "projects/{}/jobTriggers/{}".format(PROJECT_ID, TRIGGER_ID)
class TestCloudDLPHook(unittest.TestCase):
def setUp(self):
with mock.patch(
"airflow.providers.google.cloud.hooks.base.CloudBaseHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = CloudDLPHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.client_info",
new_callable=mock.PropertyMock
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook._get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.dlp.DlpServiceClient")
def test_dlp_service_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.hook.get_conn()
mock_client.assert_called_once_with(
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.hook._client, result)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_cancel_dlp_job(self, get_conn):
self.hook.cancel_dlp_job(dlp_job_id=DLP_JOB_ID, project_id=PROJECT_ID)
get_conn.return_value.cancel_dlp_job.assert_called_once_with(
name=DLP_JOB_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_cancel_dlp_job_without_dlp_job_id(self, _):
with self.assertRaises(AirflowException):
self.hook.cancel_dlp_job(dlp_job_id=None, project_id=PROJECT_ID)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_cancel_dlp_job_without_parent(self, _, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.cancel_dlp_job(dlp_job_id=DLP_JOB_ID)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_create_deidentify_template_with_org_id(self, get_conn, mock_project_id):
get_conn.return_value.create_deidentify_template.return_value = API_RESPONSE
result = self.hook.create_deidentify_template(organization_id=ORGANIZATION_ID)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.create_deidentify_template.assert_called_once_with(
parent=ORGANIZATION_PATH,
deidentify_template=None,
template_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_create_deidentify_template_with_project_id(self, get_conn):
get_conn.return_value.create_deidentify_template.return_value = API_RESPONSE
result = self.hook.create_deidentify_template(project_id=PROJECT_ID)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.create_deidentify_template.assert_called_once_with(
parent=PROJECT_PATH,
deidentify_template=None,
template_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_create_deidentify_template_without_parent(self, _, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.create_deidentify_template()
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_create_dlp_job(self, get_conn):
get_conn.return_value.create_dlp_job.return_value = API_RESPONSE
result = self.hook.create_dlp_job(
project_id=PROJECT_ID, wait_until_finished=False
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.create_dlp_job.assert_called_once_with(
parent=PROJECT_PATH,
inspect_job=None,
risk_job=None,
job_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_create_dlp_job_without_project_id(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.create_dlp_job()
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_create_dlp_job_with_wait_until_finished(self, get_conn):
job_for_create = DlpJob(name=DLP_JOB_PATH, state=DlpJob.JobState.PENDING)
get_conn.return_value.create_dlp_job.return_value = job_for_create
job_for_get = DlpJob(name=DLP_JOB_PATH, state=DlpJob.JobState.DONE)
get_conn.return_value.get_dlp_job.return_value = job_for_get
self.hook.create_dlp_job(project_id=PROJECT_ID)
get_conn.return_value.get_dlp_job.assert_called_once_with(
name=DLP_JOB_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_create_inspect_template_with_org_id(self, get_conn, mock_project_id):
get_conn.return_value.create_inspect_template.return_value = API_RESPONSE
result = self.hook.create_inspect_template(organization_id=ORGANIZATION_ID)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.create_inspect_template.assert_called_once_with(
parent=ORGANIZATION_PATH,
inspect_template=None,
template_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_create_inspect_template_with_project_id(self, get_conn):
get_conn.return_value.create_inspect_template.return_value = API_RESPONSE
result = self.hook.create_inspect_template(project_id=PROJECT_ID)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.create_inspect_template.assert_called_once_with(
parent=PROJECT_PATH,
inspect_template=None,
template_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_create_inspect_template_without_parent(self, _, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.create_inspect_template()
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_create_job_trigger(self, get_conn):
get_conn.return_value.create_job_trigger.return_value = API_RESPONSE
result = self.hook.create_job_trigger(project_id=PROJECT_ID)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.create_job_trigger.assert_called_once_with(
parent=PROJECT_PATH,
job_trigger=None,
trigger_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch( # type: ignore
"airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn"
)
def test_create_job_trigger_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.create_job_trigger()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_create_stored_info_type_with_org_id(self, get_conn, mock_project_id):
get_conn.return_value.create_stored_info_type.return_value = API_RESPONSE
result = self.hook.create_stored_info_type(organization_id=ORGANIZATION_ID)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.create_stored_info_type.assert_called_once_with(
parent=ORGANIZATION_PATH,
config=None,
stored_info_type_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_create_stored_info_type_with_project_id(self, get_conn):
get_conn.return_value.create_stored_info_type.return_value = API_RESPONSE
result = self.hook.create_stored_info_type(project_id=PROJECT_ID)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.create_stored_info_type.assert_called_once_with(
parent=PROJECT_PATH,
config=None,
stored_info_type_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch( # type: ignore
"airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn"
)
def test_create_stored_info_type_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.create_stored_info_type()
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_deidentify_content(self, get_conn):
get_conn.return_value.deidentify_content.return_value = API_RESPONSE
result = self.hook.deidentify_content(project_id=PROJECT_ID)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.deidentify_content.assert_called_once_with(
parent=PROJECT_PATH,
deidentify_config=None,
inspect_config=None,
item=None,
inspect_template_name=None,
deidentify_template_name=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch( # type: ignore
"airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn"
)
def test_deidentify_content_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.deidentify_content()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch( # type: ignore
"airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn"
)
def test_delete_deidentify_template_with_org_id(self, get_conn, mock_project_id):
self.hook.delete_deidentify_template(
template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID
)
get_conn.return_value.delete_deidentify_template.assert_called_once_with(
name=DEIDENTIFY_TEMPLATE_ORGANIZATION_PATH,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_deidentify_template_with_project_id(self, get_conn):
self.hook.delete_deidentify_template(
template_id=TEMPLATE_ID, project_id=PROJECT_ID
)
get_conn.return_value.delete_deidentify_template.assert_called_once_with(
name=DEIDENTIFY_TEMPLATE_PROJECT_PATH,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_deidentify_template_without_template_id(self, _):
with self.assertRaises(AirflowException):
self.hook.delete_deidentify_template(template_id=None)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_deidentify_template_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.delete_deidentify_template(template_id=TEMPLATE_ID)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_dlp_job(self, get_conn):
self.hook.delete_dlp_job(dlp_job_id=DLP_JOB_ID, project_id=PROJECT_ID)
get_conn.return_value.delete_dlp_job.assert_called_once_with(
name=DLP_JOB_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_dlp_job_without_dlp_job_id(self, _):
with self.assertRaises(AirflowException):
self.hook.delete_dlp_job(dlp_job_id=None, project_id=PROJECT_ID)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_dlp_job_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.delete_dlp_job(dlp_job_id=DLP_JOB_ID)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_inspect_template_with_org_id(self, get_conn, mock_project_id):
self.hook.delete_inspect_template(
template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID
)
get_conn.return_value.delete_inspect_template.assert_called_once_with(
name=INSPECT_TEMPLATE_ORGANIZATION_PATH,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_inspect_template_with_project_id(self, get_conn):
self.hook.delete_inspect_template(
template_id=TEMPLATE_ID, project_id=PROJECT_ID
)
get_conn.return_value.delete_inspect_template.assert_called_once_with(
name=INSPECT_TEMPLATE_PROJECT_PATH,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_inspect_template_without_template_id(self, _):
with self.assertRaises(AirflowException):
self.hook.delete_inspect_template(template_id=None)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_inspect_template_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.delete_inspect_template(template_id=TEMPLATE_ID)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_job_trigger(self, get_conn):
self.hook.delete_job_trigger(job_trigger_id=TRIGGER_ID, project_id=PROJECT_ID)
get_conn.return_value.delete_job_trigger.assert_called_once_with(
name=JOB_TRIGGER_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_job_trigger_without_trigger_id(self, _):
with self.assertRaises(AirflowException):
self.hook.delete_job_trigger(job_trigger_id=None, project_id=PROJECT_ID)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_job_trigger_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.delete_job_trigger(job_trigger_id=TRIGGER_ID)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_stored_info_type_with_org_id(self, get_conn, mock_project_id):
self.hook.delete_stored_info_type(
stored_info_type_id=STORED_INFO_TYPE_ID, organization_id=ORGANIZATION_ID
)
get_conn.return_value.delete_stored_info_type.assert_called_once_with(
name=STORED_INFO_TYPE_ORGANIZATION_PATH,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_stored_info_type_with_project_id(self, get_conn):
self.hook.delete_stored_info_type(
stored_info_type_id=STORED_INFO_TYPE_ID, project_id=PROJECT_ID
)
get_conn.return_value.delete_stored_info_type.assert_called_once_with(
name=STORED_INFO_TYPE_PROJECT_PATH,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_stored_info_type_without_stored_info_type_id(self, _):
with self.assertRaises(AirflowException):
self.hook.delete_stored_info_type(stored_info_type_id=None)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_delete_stored_info_type_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.delete_stored_info_type(stored_info_type_id=STORED_INFO_TYPE_ID)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_deidentify_template_with_org_id(self, get_conn, mock_project_id):
get_conn.return_value.get_deidentify_template.return_value = API_RESPONSE
result = self.hook.get_deidentify_template(
template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.get_deidentify_template.assert_called_once_with(
name=DEIDENTIFY_TEMPLATE_ORGANIZATION_PATH,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_deidentify_template_with_project_id(self, get_conn):
get_conn.return_value.get_deidentify_template.return_value = API_RESPONSE
result = self.hook.get_deidentify_template(
template_id=TEMPLATE_ID, project_id=PROJECT_ID
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.get_deidentify_template.assert_called_once_with(
name=DEIDENTIFY_TEMPLATE_PROJECT_PATH,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_deidentify_template_without_template_id(self, _):
with self.assertRaises(AirflowException):
self.hook.get_deidentify_template(template_id=None)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_deidentify_template_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.get_deidentify_template(template_id=TEMPLATE_ID)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_dlp_job(self, get_conn):
get_conn.return_value.get_dlp_job.return_value = API_RESPONSE
result = self.hook.get_dlp_job(dlp_job_id=DLP_JOB_ID, project_id=PROJECT_ID)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.get_dlp_job.assert_called_once_with(
name=DLP_JOB_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_dlp_job_without_dlp_job_id(self, _):
with self.assertRaises(AirflowException):
self.hook.get_dlp_job(dlp_job_id=None, project_id=PROJECT_ID)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_dlp_job_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.get_dlp_job(dlp_job_id=DLP_JOB_ID)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_inspect_template_with_org_id(self, get_conn, mock_project_id):
get_conn.return_value.get_inspect_template.return_value = API_RESPONSE
result = self.hook.get_inspect_template(
template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.get_inspect_template.assert_called_once_with(
name=INSPECT_TEMPLATE_ORGANIZATION_PATH,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_inspect_template_with_project_id(self, get_conn):
get_conn.return_value.get_inspect_template.return_value = API_RESPONSE
result = self.hook.get_inspect_template(
template_id=TEMPLATE_ID, project_id=PROJECT_ID
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.get_inspect_template.assert_called_once_with(
name=INSPECT_TEMPLATE_PROJECT_PATH,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_inspect_template_without_template_id(self, _):
with self.assertRaises(AirflowException):
self.hook.get_inspect_template(template_id=None)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_inspect_template_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.get_inspect_template(template_id=TEMPLATE_ID)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_job_trigger(self, get_conn):
get_conn.return_value.get_job_trigger.return_value = API_RESPONSE
result = self.hook.get_job_trigger(
job_trigger_id=TRIGGER_ID, project_id=PROJECT_ID
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.get_job_trigger.assert_called_once_with(
name=JOB_TRIGGER_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_job_trigger_without_trigger_id(self, _):
with self.assertRaises(AirflowException):
self.hook.get_job_trigger(job_trigger_id=None, project_id=PROJECT_ID)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_job_trigger_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.get_job_trigger(job_trigger_id=TRIGGER_ID)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_stored_info_type_with_org_id(self, get_conn, mock_project_id):
get_conn.return_value.get_stored_info_type.return_value = API_RESPONSE
result = self.hook.get_stored_info_type(
stored_info_type_id=STORED_INFO_TYPE_ID, organization_id=ORGANIZATION_ID
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.get_stored_info_type.assert_called_once_with(
name=STORED_INFO_TYPE_ORGANIZATION_PATH,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_stored_info_type_with_project_id(self, get_conn):
get_conn.return_value.get_stored_info_type.return_value = API_RESPONSE
result = self.hook.get_stored_info_type(
stored_info_type_id=STORED_INFO_TYPE_ID, project_id=PROJECT_ID
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.get_stored_info_type.assert_called_once_with(
name=STORED_INFO_TYPE_PROJECT_PATH,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_stored_info_type_without_stored_info_type_id(self, _):
with self.assertRaises(AirflowException):
self.hook.get_stored_info_type(stored_info_type_id=None)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_get_stored_info_type_without_parent(self, mock_get_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.get_stored_info_type(stored_info_type_id=STORED_INFO_TYPE_ID)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_inspect_content(self, get_conn):
get_conn.return_value.inspect_content.return_value = API_RESPONSE
result = self.hook.inspect_content(project_id=PROJECT_ID)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.inspect_content.assert_called_once_with(
parent=PROJECT_PATH,
inspect_config=None,
item=None,
inspect_template_name=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_inspect_content_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.inspect_content()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_list_deidentify_templates_with_org_id(self, get_conn, mock_project_id):
result = self.hook.list_deidentify_templates(organization_id=ORGANIZATION_ID)
self.assertIsInstance(result, list)
get_conn.return_value.list_deidentify_templates.assert_called_once_with(
parent=ORGANIZATION_PATH,
page_size=None,
order_by=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_list_deidentify_templates_with_project_id(self, get_conn):
result = self.hook.list_deidentify_templates(project_id=PROJECT_ID)
self.assertIsInstance(result, list)
get_conn.return_value.list_deidentify_templates.assert_called_once_with(
parent=PROJECT_PATH,
page_size=None,
order_by=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_list_deidentify_templates_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.list_deidentify_templates()
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_list_dlp_jobs(self, get_conn):
result = self.hook.list_dlp_jobs(project_id=PROJECT_ID)
self.assertIsInstance(result, list)
get_conn.return_value.list_dlp_jobs.assert_called_once_with(
parent=PROJECT_PATH,
filter_=None,
page_size=None,
type_=None,
order_by=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_list_dlp_jobs_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.list_dlp_jobs()
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_list_info_types(self, get_conn):
get_conn.return_value.list_info_types.return_value = API_RESPONSE
result = self.hook.list_info_types()
self.assertIs(result, API_RESPONSE)
get_conn.return_value.list_info_types.assert_called_once_with(
language_code=None, filter_=None, retry=None, timeout=None, metadata=None
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_list_inspect_templates_with_org_id(self, get_conn, mock_project_id):
result = self.hook.list_inspect_templates(organization_id=ORGANIZATION_ID)
self.assertIsInstance(result, list)
get_conn.return_value.list_inspect_templates.assert_called_once_with(
parent=ORGANIZATION_PATH,
page_size=None,
order_by=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_list_inspect_templates_with_project_id(self, get_conn):
result = self.hook.list_inspect_templates(project_id=PROJECT_ID)
self.assertIsInstance(result, list)
get_conn.return_value.list_inspect_templates.assert_called_once_with(
parent=PROJECT_PATH,
page_size=None,
order_by=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_list_inspect_templates_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.list_inspect_templates()
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_list_job_triggers(self, get_conn):
result = self.hook.list_job_triggers(project_id=PROJECT_ID)
self.assertIsInstance(result, list)
get_conn.return_value.list_job_triggers.assert_called_once_with(
parent=PROJECT_PATH,
page_size=None,
order_by=None,
filter_=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_list_job_triggers_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.list_job_triggers()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_list_stored_info_types_with_org_id(self, get_conn, mock_project_id):
result = self.hook.list_stored_info_types(organization_id=ORGANIZATION_ID)
self.assertIsInstance(result, list)
get_conn.return_value.list_stored_info_types.assert_called_once_with(
parent=ORGANIZATION_PATH,
page_size=None,
order_by=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_list_stored_info_types_with_project_id(self, get_conn):
result = self.hook.list_stored_info_types(project_id=PROJECT_ID)
self.assertIsInstance(result, list)
get_conn.return_value.list_stored_info_types.assert_called_once_with(
parent=PROJECT_PATH,
page_size=None,
order_by=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_list_stored_info_types_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.list_stored_info_types()
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_redact_image(self, get_conn):
get_conn.return_value.redact_image.return_value = API_RESPONSE
result = self.hook.redact_image(project_id=PROJECT_ID)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.redact_image.assert_called_once_with(
parent=PROJECT_PATH,
inspect_config=None,
image_redaction_configs=None,
include_findings=None,
byte_item=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_redact_image_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.redact_image()
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_reidentify_content(self, get_conn):
get_conn.return_value.reidentify_content.return_value = API_RESPONSE
result = self.hook.reidentify_content(project_id=PROJECT_ID)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.reidentify_content.assert_called_once_with(
parent=PROJECT_PATH,
reidentify_config=None,
inspect_config=None,
item=None,
inspect_template_name=None,
reidentify_template_name=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch( # type: ignore
"airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn"
)
def test_reidentify_content_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.reidentify_content()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_update_deidentify_template_with_org_id(self, get_conn, mock_project_id):
get_conn.return_value.update_deidentify_template.return_value = API_RESPONSE
result = self.hook.update_deidentify_template(
template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.update_deidentify_template.assert_called_once_with(
name=DEIDENTIFY_TEMPLATE_ORGANIZATION_PATH,
deidentify_template=None,
update_mask=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_update_deidentify_template_with_project_id(self, get_conn):
get_conn.return_value.update_deidentify_template.return_value = API_RESPONSE
result = self.hook.update_deidentify_template(
template_id=TEMPLATE_ID, project_id=PROJECT_ID
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.update_deidentify_template.assert_called_once_with(
name=DEIDENTIFY_TEMPLATE_PROJECT_PATH,
deidentify_template=None,
update_mask=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_update_deidentify_template_without_template_id(self, _):
with self.assertRaises(AirflowException):
self.hook.update_deidentify_template(
template_id=None, organization_id=ORGANIZATION_ID
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_update_deidentify_template_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.update_deidentify_template(template_id=TEMPLATE_ID)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_update_inspect_template_with_org_id(self, get_conn, mock_project_id):
get_conn.return_value.update_inspect_template.return_value = API_RESPONSE
result = self.hook.update_inspect_template(
template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.update_inspect_template.assert_called_once_with(
name=INSPECT_TEMPLATE_ORGANIZATION_PATH,
inspect_template=None,
update_mask=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_update_inspect_template_with_project_id(self, get_conn):
get_conn.return_value.update_inspect_template.return_value = API_RESPONSE
result = self.hook.update_inspect_template(
template_id=TEMPLATE_ID, project_id=PROJECT_ID
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.update_inspect_template.assert_called_once_with(
name=INSPECT_TEMPLATE_PROJECT_PATH,
inspect_template=None,
update_mask=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_update_inspect_template_without_template_id(self, _):
with self.assertRaises(AirflowException):
self.hook.update_inspect_template(
template_id=None, organization_id=ORGANIZATION_ID
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_update_inspect_template_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.update_inspect_template(template_id=TEMPLATE_ID)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_update_job_trigger(self, get_conn):
get_conn.return_value.update_job_trigger.return_value = API_RESPONSE
result = self.hook.update_job_trigger(
job_trigger_id=TRIGGER_ID, project_id=PROJECT_ID
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.update_job_trigger.assert_called_once_with(
name=JOB_TRIGGER_PATH,
job_trigger=None,
update_mask=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_update_job_trigger_without_job_trigger_id(self, _):
with self.assertRaises(AirflowException):
self.hook.update_job_trigger(job_trigger_id=None, project_id=PROJECT_ID)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_update_job_trigger_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.update_job_trigger(job_trigger_id=TRIGGER_ID)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_update_stored_info_type_with_org_id(self, get_conn, mock_project_id):
get_conn.return_value.update_stored_info_type.return_value = API_RESPONSE
result = self.hook.update_stored_info_type(
stored_info_type_id=STORED_INFO_TYPE_ID, organization_id=ORGANIZATION_ID
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.update_stored_info_type.assert_called_once_with(
name=STORED_INFO_TYPE_ORGANIZATION_PATH,
config=None,
update_mask=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn")
def test_update_stored_info_type_with_project_id(self, get_conn):
get_conn.return_value.update_stored_info_type.return_value = API_RESPONSE
result = self.hook.update_stored_info_type(
stored_info_type_id=STORED_INFO_TYPE_ID, project_id=PROJECT_ID
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.update_stored_info_type.assert_called_once_with(
name=STORED_INFO_TYPE_PROJECT_PATH,
config=None,
update_mask=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch( # type: ignore
"airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn"
)
def test_update_stored_info_type_without_stored_info_type_id(self, _):
with self.assertRaises(AirflowException):
self.hook.update_stored_info_type(
stored_info_type_id=None, organization_id=ORGANIZATION_ID
)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch( # type: ignore
"airflow.providers.google.cloud.hooks.dlp.CloudDLPHook.get_conn"
)
def test_update_stored_info_type_without_parent(self, mock_get_conn, mock_project_id):
with self.assertRaises(AirflowException):
self.hook.update_stored_info_type(stored_info_type_id=STORED_INFO_TYPE_ID)
| |
#!/usr/local/bin/python2.4
##!/usr/bin/python
# This is a little script to analyse data taken with the align
# scripts. As the main result it will show recommended values for
# ALFOSC grism/slit wheel motor units.
# This script requires a valid IRAF login.cl file to be present
# in the 'present working directory'. I'm using PyRAF.
# All temporary files will be under /tmp/tiasgat/
# The IRAF to DS9 communication is setup to use private fifos.
# To tell IRAF where the fifos are, this script sets the environment like
# setenv IMTDEV fifo:/.../tiasgat/imt1i:/.../tiasgat/imt1o
# See the 'start_ds9' function below.
# To make the fifo's use "mknod imt1i p" and "mknod imt1o p";
# check the fifopath variable below.
# JHT, Mar/Apr 2006
# thanks to Ricardo for help with python issues!!
####### Load modules #######
# standard library
import Tkinter as Tk
import os
import time
import subprocess
# Define where Tiasgat modules are to be found
# This is only necesary if those modules are not in the
# same directory as the main program tiasgat.py
#import sys
#modulepath = "/home/jht/python"
#sys.path.append(modulepath)
#print sys.path
# Load Tiasgat module stuff
import HG, VG, HS, VS
from tiasgatfuncs import *
####### Define and initialize global variables #######
filename = "ALalgn0001"
ds9_pid = -99
datapath = "/data/alfosc/"
#fifopath = "/var/scratch/staff/jht/alfosc/tiasgat/"
fifopath = "/var/postprocess/alfoscAlignTool/"
####### Define functions #######
def start_ds9():
global ds9_pid, ds9_proc
if ds9_pid == -99: # start-up of first instance of ds9
# Tell IRAF where to find the DS9 communication fifos
os.putenv("IMTDEV","fifo:"+fifopath+"imt1i:"+fifopath+"imt1o")
#os.system("env")
messageOut(messageTextWidget,"Starting ds9 ...\n")
wortel.update_idletasks()
ds9_proc=subprocess.Popen(["/usr/local/bin/ds9", "-title", "Tiasgat!",
"-fifo", fifopath+"imt1", "-fifo_only",
"-zoom", "4", "-geometry", "620x780", "-cmap", "blue"])
ds9_pid=ds9_proc.pid
# give ds9 some time to start up
time.sleep(1.0)
if ds9_proc.poll() == 0: # start-up of ds9 if any have been killed by user
messageOut(messageTextWidget,"Starting new instance of ds9 ...\n")
wortel.update_idletasks()
ds9_proc=subprocess.Popen(["/usr/local/bin/ds9", "-title", "Tiasgat!",
"-fifo", fifopath+"imt1", "-fifo_only",
"-zoom", "4", "-geometry", "620x780", "-cmap", "blue"])
ds9_pid=ds9_proc.pid
# give ds9 some time to start up
time.sleep(1.0)
# Now the wrappers for the actual PyRAF stuff; the reason is because
# the Widgets do not allow arguments, such as the filename to be
# passed on. The Widgets can only invoke functions that have no parameters.
def VertSlit():
global filename
filename=fname.get()
start_ds9()
resultstr.set(VS.doit(filename,datapath,messageTextWidget))
wortel.tkraise()
wortel.update_idletasks()
def HorSlit():
global filename
filename=fname.get()
start_ds9()
resultstr.set(HS.doit(filename,datapath,messageTextWidget))
wortel.tkraise()
wortel.update_idletasks()
def VertGrism():
global filename
filename=fname.get()
start_ds9()
resultstr.set(VG.doit(filename,datapath,messageTextWidget))
wortel.tkraise()
wortel.update_idletasks()
def HorGrism():
global filename
filename=fname.get()
start_ds9()
resultstr.set(HG.doit(filename,datapath,messageTextWidget))
wortel.tkraise()
wortel.update_idletasks()
# Function to get latest image file name
def GetLatest():
# Use simple pipe command os.popen
dummy=os.popen("cd "+datapath+"; ls -1rt *fits")
latestfile=dummy.readlines()[-1].split(".")[0]
dummy.close()
messageOut(messageTextWidget,
"\nLatest ALFOSC file is "+datapath+latestfile+".fits\n")
fname.set(latestfile)
wortel.update_idletasks()
# This to prevent that the messages window is killed while the
# main window is still alive
def interceptDestroyProtocol():
# This is a message that is displayed when the WM 'delete' is used.
print "Use the \'Quit\' button ...."
wortel.tkraise()
####### Define GUI #######
# Define window and some settings
wortel=Tk.Tk()
wortel.config(background="pink")
wortel.resizable(False,False)
wortel.title("The incredible ALFOSC slit/grism align tool !")
wortel.iconname("Tiasgat!")
fname=Tk.StringVar()
fname.set(filename)
resultstr=Tk.StringVar()
resultstr.set("Result: none yet")
# top row of GUI
fw=Tk.Frame(wortel)
Tk.Button(fw,activebackground="lightgoldenrod",background="LightSteelBlue1",
text="Align horizontal slit",command=HorSlit,height=2).pack(side="left")
Tk.Button(fw,activebackground="lightgoldenrod",background="LightSteelBlue1",
text="Align vertical slit",command=VertSlit).pack(side="left",fill="both")
Tk.Button(fw,activebackground="lightgoldenrod",background="LightSteelBlue1",
text="Align horizontal grism",command=HorGrism).pack(side="left",fill="both")
Tk.Button(fw,activebackground="lightgoldenrod",background="LightSteelBlue1",
text="Align vertical grism",command=VertGrism).pack(side="left",fill="both")
fw.pack()
# middle row of GUI
fw=Tk.Frame(wortel)
Tk.Label(fw,height=2,width=70,background="hotpink",
textvariable=resultstr).pack(side="left",fill="x",expand=True)
fw.pack(side="top",fill="x",expand=True)
# bottom row of GUI
Tk.Label(background="pink",anchor="e",
text="File name:").pack(side="left",fill="both",expand=True)
entryWidget=Tk.Entry(background="pink",width=10,textvariable=fname)
entryWidget.pack(side="left",fill="both")
Tk.Button(background="pink",activebackground="lightgoldenrod",text="Latest file",
command=GetLatest).pack(side="left",fill="both",expand=False)
Tk.Label(background="pink",text=" ").pack(side="left",fill="both",expand=True)
Tk.Button(background="pink",activebackground="lightgoldenrod",text="Quit",
command=wortel.quit,height=2).pack(side="left",fill="both",expand=True)
# set focus to entry widget, so you dont have to click on it first
entryWidget.focus_set()
# Define a message window with Text widget
messageWindow=Tk.Toplevel()
messageWindow.geometry("586x300")
messageWindow.title("Tiasgat! messages")
messageWindow.iconname("Tiasgat! messages")
messageTextWidget=Tk.Text(messageWindow,background="pink")
messageTextWidget.pack(side="left",fill="both",expand=True)
messageTextWidget.config(state="disabled")
# Bind a scrollbar to the text and vice-versa
scrollIt=Tk.Scrollbar(messageWindow,command=messageTextWidget.yview,
troughcolor="hotpink",
background="LightSteelBlue1",activebackground="lightgoldenrod")
scrollIt.pack(side="left",fill="y")
messageTextWidget.config(yscrollcommand=scrollIt.set)
# intercept WM DELETE
messageWindow.protocol("WM_DELETE_WINDOW", interceptDestroyProtocol)
###### Start event loop ######
# the event loop can be stopped by clicking the 'Quit' button
Tk.mainloop()
# Commands to be executed once the event loop has stopped
if ds9_pid != -99: os.kill(ds9_pid,15)
print '\nClear skies !\n'
| |
# Copyright 2012-2013 Ravello Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from testmill.state import env
def _fixup_image(image):
# XXX: Strip TestMill: prefix. We keep the testmill images with this
# prefix until we've got a hierarchical library structure where we
# can put them.
if image['name'].startswith('TestMill:'):
image['name'] = image['name'][9:]
def _init_image_cache():
"""Initialize the images cache."""
if hasattr(env, '_images'):
return
images = env.api.get_images()
for image in images:
_fixup_image(image)
env._images = images
env._images_byid = {}
env._images_byname = {}
def get_images():
"""Return a list of all images."""
_init_image_cache()
return env._images
def get_image(id=None, name=None):
"""Get an image based on its id or name."""
_init_image_cache()
if id:
img = env._images_byid.get(id)
if img is not None:
return img
img = env.api.get_image(id)
_fixup_image(img)
env._images_byid[img['id']] = img
env._images_byname[img['name']] = img
return img
elif name:
img = env._images_byname.get(name)
if img is not None:
return img
for img in env._images:
if img['name'] == name:
break
else:
return
img = env.api.get_image(img['id'])
_fixup_image(img)
env._images_byid[img['id']] = img
env._images_byname[img['name']] = img
return img
else:
raise ValueError('Specifiy either "id" or "name".')
def _init_application_cache():
"""Initialize the applications cache."""
if hasattr(env, '_applications'):
return
env._applications = env.api.get_applications()
env._applications_byid = {}
env._applications_byname = {}
def get_applications():
"""Return a list of all applications."""
_init_application_cache()
return env._applications
def find_applications(project=None, defname=None, instance=None):
"""Find one or more applications."""
_init_application_cache()
applications = []
for app in env._applications:
parts = app['name'].split(':')
if len(parts) != 3:
continue
if project is not None and parts[0] != project or \
defname is not None and parts[1] != defname or \
instance is not None and parts[2] != instance:
continue
applications.append(app)
return applications
def get_application(id=None, name=None, force_reload=False):
"""Get an application based on its id or name."""
_init_application_cache()
if force_reload:
env._applicatons = env.api.get_applications()
if id:
if not force_reload:
app = env._applications_byid.get(id)
if app is not None:
return app
app = env.api.get_application(id)
if app:
env._applications_byid[app['id']] = app
env._applications_byname[app['name']] = app
elif force_reload and id in env._applications_byid:
oldapp = env._applications_byid[id]
del env._applications_byid[oldapp['id']]
del env._applications_byname[oldapp['name']]
return app
elif name:
if not force_reload:
app = env._applications_byname.get(name)
if app is not None:
return app
for app in env._applications:
if app['name'] == name:
break
else:
return
app = env.api.get_application(app['id'])
if app:
env._applications_byid[app['id']] = app
env._applications_byname[app['name']] = app
elif force_reload and name in env._applications_byname:
oldapp = env._applications_byname[name]
del env._applications_byid[oldapp['id']]
del env._applications_byname[oldapp['name']]
return app
else:
raise ValueError('Specifiy either "id" or "name".')
def _init_blueprint_cache():
"""Initialize the blueprints cache."""
if hasattr(env, '_blueprints'):
return
env._blueprints = env.api.get_blueprints()
env._blueprints_byid = {}
env._blueprints_byname = {}
def get_blueprints():
"""Return a list of all blueprints."""
_init_blueprint_cache()
return env._blueprints
def find_blueprints(project=None, defname=None, instance=None):
"""Find one or more blueprints."""
_init_blueprint_cache()
blueprints = []
for bp in env._blueprints:
parts = bp['name'].split(':')
if len(parts) != 3:
continue
if project is not None and parts[0] != project or \
defname is not None and parts[1] != defname or \
instance is not None and parts[2] != instance:
continue
blueprints.append(bp)
return blueprints
def get_blueprint(id=None, name=None, force_reload=False):
"""Get an blueprint based on its id or name."""
_init_blueprint_cache()
if force_reload:
env._blueprints = env.api.get_blueprints()
if id:
if not force_reload:
bp = env._blueprints_byid.get(id)
if bp is not None:
return bp
bp = env.api.get_blueprint(id)
if bp:
env._blueprints_byid[bp['id']] = bp
env._blueprints_byname[bp['name']] = bp
elif force_reload and id in env._blueprints_byid:
oldbp = env._blueprints_byid[id]
del env._blueprints_byid[oldbp['id']]
del env._blueprints_byname[oldbp['name']]
return bp
elif name:
if not force_reload:
bp = env._blueprints_byname.get(name)
if bp is not None:
return bp
for bp in env._blueprints:
if bp['name'] == name:
break
else:
return
bp = env.api.get_blueprint(bp['id'])
if bp:
env._blueprints_byid[bp['id']] = bp
env._blueprints_byname[bp['name']] = bp
elif force_reload and name in env._blueprints_byname:
oldbp = env._blueprints_byname[name]
del env._blueprints_byid[oldbp['id']]
del env._blueprints_byname[oldbp['name']]
return bp
else:
raise ValueError('Specifiy either "id" or "name".')
| |
#!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import base64
import json
import logging
import struct
import threading
import time
import unittest
from vtdb import dbexceptions
from vtdb import keyrange_constants
import environment
import utils
import tablet
keyspace_id_type = keyrange_constants.KIT_UINT64
pack_keyspace_id = struct.Struct('!Q').pack
# initial shards
# range "" - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_ny_rdonly = tablet.Tablet(cell='ny')
# range 80 - ""
shard_1_master = tablet.Tablet()
shard_1_slave1 = tablet.Tablet()
shard_1_slave2 = tablet.Tablet()
shard_1_ny_rdonly = tablet.Tablet(cell='ny')
shard_1_rdonly1 = tablet.Tablet()
# split shards
# range 80 - c0
shard_2_master = tablet.Tablet()
shard_2_replica1 = tablet.Tablet()
shard_2_replica2 = tablet.Tablet()
# range c0 - ""
shard_3_master = tablet.Tablet()
shard_3_replica = tablet.Tablet()
shard_3_rdonly1 = tablet.Tablet()
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [
shard_0_master.init_mysql(),
shard_0_replica.init_mysql(),
shard_0_ny_rdonly.init_mysql(),
shard_1_master.init_mysql(),
shard_1_slave1.init_mysql(),
shard_1_slave2.init_mysql(),
shard_1_ny_rdonly.init_mysql(),
shard_1_rdonly1.init_mysql(),
shard_2_master.init_mysql(),
shard_2_replica1.init_mysql(),
shard_2_replica2.init_mysql(),
shard_3_master.init_mysql(),
shard_3_replica.init_mysql(),
shard_3_rdonly1.init_mysql(),
]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
teardown_procs = [
shard_0_master.teardown_mysql(),
shard_0_replica.teardown_mysql(),
shard_0_ny_rdonly.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_slave1.teardown_mysql(),
shard_1_slave2.teardown_mysql(),
shard_1_ny_rdonly.teardown_mysql(),
shard_1_rdonly1.teardown_mysql(),
shard_2_master.teardown_mysql(),
shard_2_replica1.teardown_mysql(),
shard_2_replica2.teardown_mysql(),
shard_3_master.teardown_mysql(),
shard_3_replica.teardown_mysql(),
shard_3_rdonly1.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_0_master.remove_tree()
shard_0_replica.remove_tree()
shard_0_ny_rdonly.remove_tree()
shard_1_master.remove_tree()
shard_1_slave1.remove_tree()
shard_1_slave2.remove_tree()
shard_1_ny_rdonly.remove_tree()
shard_1_rdonly1.remove_tree()
shard_2_master.remove_tree()
shard_2_replica1.remove_tree()
shard_2_replica2.remove_tree()
shard_3_master.remove_tree()
shard_3_replica.remove_tree()
shard_3_rdonly1.remove_tree()
# InsertThread will insert a value into the timestamps table, and then
# every 1/5s will update its value with the current timestamp
class InsertThread(threading.Thread):
def __init__(self, tablet, object_name, user_id, keyspace_id):
threading.Thread.__init__(self)
self.tablet = tablet
self.object_name = object_name
self.user_id = user_id
self.keyspace_id = keyspace_id
if keyspace_id_type == keyrange_constants.KIT_BYTES:
self.str_keyspace_id = base64.b64encode(pack_keyspace_id(keyspace_id))
else:
self.str_keyspace_id = "%d" % keyspace_id
self.done = False
self.tablet.mquery('vt_test_keyspace', [
'begin',
'insert into timestamps(name, time_milli, keyspace_id) values("%s", %d, 0x%x) /* EMD keyspace_id:%s user_id:%d */' %
(self.object_name, long(time.time() * 1000), self.keyspace_id,
self.str_keyspace_id, self.user_id),
'commit'
], write=True, user='vt_app')
self.start()
def run(self):
try:
while not self.done:
self.tablet.mquery('vt_test_keyspace', [
'begin',
'update timestamps set time_milli=%d where name="%s" /* EMD keyspace_id:%s user_id:%d */' % (long(time.time() * 1000), self.object_name, self.str_keyspace_id, self.user_id),
'commit'
], write=True, user='vt_app')
time.sleep(0.2)
except Exception as e:
logging.error("InsertThread got exception: %s", e)
# MonitorLagThread will get values from a database, and compare the timestamp
# to evaluate lag. Since the qps is really low, and we send binlogs as chuncks,
# the latency is pretty high (a few seconds).
class MonitorLagThread(threading.Thread):
def __init__(self, tablet, object_name):
threading.Thread.__init__(self)
self.tablet = tablet
self.object_name = object_name
self.done = False
self.max_lag = 0
self.lag_sum = 0
self.sample_count = 0
self.start()
def run(self):
try:
while not self.done:
result = self.tablet.mquery('vt_test_keyspace', 'select time_milli from timestamps where name="%s"' % self.object_name)
if result:
lag = long(time.time() * 1000) - long(result[0][0])
logging.debug("MonitorLagThread(%s) got %d", self.object_name, lag)
self.sample_count += 1
self.lag_sum += lag
if lag > self.max_lag:
self.max_lag = lag
time.sleep(1.0)
except Exception as e:
logging.error("MonitorLagThread got exception: %s", e)
class TestResharding(unittest.TestCase):
# create_schema will create the same schema on the keyspace
# then insert some values
def _create_schema(self):
if keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
create_table_template = '''create table %s(
id bigint auto_increment,
msg varchar(64),
keyspace_id ''' + t + ''' not null,
primary key (id),
index by_msg (msg)
) Engine=InnoDB'''
create_view_template = '''create view %s(id, msg, keyspace_id) as select id, msg, keyspace_id from %s'''
create_timestamp_table = '''create table timestamps(
name varchar(64),
time_milli bigint(20) unsigned not null,
keyspace_id ''' + t + ''' not null,
primary key (name)
) Engine=InnoDB'''
create_unrelated_table = '''create table unrelated(
name varchar(64),
primary key (name)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ("resharding1"),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ("resharding2"),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_view_template % ("view1", "resharding1"),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_timestamp_table,
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_unrelated_table,
'test_keyspace'],
auto_log=True)
# _insert_value inserts a value in the MySQL database along with the comments
# required for routing.
def _insert_value(self, tablet, table, id, msg, keyspace_id):
if keyspace_id_type == keyrange_constants.KIT_BYTES:
k = base64.b64encode(pack_keyspace_id(keyspace_id))
else:
k = "%d" % keyspace_id
tablet.mquery('vt_test_keyspace', [
'begin',
'insert into %s(id, msg, keyspace_id) values(%d, "%s", 0x%x) /* EMD keyspace_id:%s user_id:%d */' % (table, id, msg, keyspace_id, k, id),
'commit'
], write=True)
def _get_value(self, tablet, table, id):
return tablet.mquery('vt_test_keyspace', 'select id, msg, keyspace_id from %s where id=%d' % (table, id))
def _check_value(self, tablet, table, id, msg, keyspace_id,
should_be_here=True):
result = self._get_value(tablet, table, id)
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = "%s"
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = "%x"
if should_be_here:
self.assertEqual(result, ((id, msg, keyspace_id),),
("Bad row in tablet %s for id=%d, keyspace_id=" +
fmt + ", row=%s") % (tablet.tablet_alias, id,
keyspace_id, str(result)))
else:
self.assertEqual(len(result), 0,
("Extra row in tablet %s for id=%d, keyspace_id=" +
fmt + ": %s") % (tablet.tablet_alias, id, keyspace_id,
str(result)))
# _is_value_present_and_correct tries to read a value.
# if it is there, it will check it is correct and return True if it is.
# if not correct, it will self.fail.
# if not there, it will return False.
def _is_value_present_and_correct(self, tablet, table, id, msg, keyspace_id):
result = self._get_value(tablet, table, id)
if len(result) == 0:
return False
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = "%s"
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = "%x"
self.assertEqual(result, ((id, msg, keyspace_id),),
("Bad row in tablet %s for id=%d, keyspace_id=" + fmt) % (
tablet.tablet_alias, id, keyspace_id))
return True
def _insert_startup_values(self):
self._insert_value(shard_0_master, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._insert_value(shard_1_master, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._insert_value(shard_1_master, 'resharding1', 3, 'msg3',
0xD000000000000000)
def _exec_dml(self, tablet, sql, bindvars):
conn = tablet.conn()
conn.begin()
try:
results = conn._execute(sql, bindvars)
conn.commit()
return results(0)
finally:
conn.close()
def _check_startup_values(self):
# check first value is in the right shard
self._check_value(shard_2_master, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_2_replica1, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_2_replica2, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_3_master, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
self._check_value(shard_3_replica, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
self._check_value(shard_3_rdonly1, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
# check second value is in the right shard too
self._check_value(shard_2_master, 'resharding1', 3, 'msg3',
0xD000000000000000, should_be_here=False)
self._check_value(shard_2_replica1, 'resharding1', 3, 'msg3',
0xD000000000000000, should_be_here=False)
self._check_value(shard_2_replica2, 'resharding1', 3, 'msg3',
0xD000000000000000, should_be_here=False)
self._check_value(shard_3_master, 'resharding1', 3, 'msg3',
0xD000000000000000)
self._check_value(shard_3_replica, 'resharding1', 3, 'msg3',
0xD000000000000000)
self._check_value(shard_3_rdonly1, 'resharding1', 3, 'msg3',
0xD000000000000000)
def _insert_lots(self, count, base=0):
for i in xrange(count):
self._insert_value(shard_1_master, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i)
self._insert_value(shard_1_master, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i)
# _check_lots returns how many of the values we have, in percents.
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_2_replica2, 'resharding1',
10000 + base + i, 'msg-range1-%d' %
i, 0xA000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_3_replica, 'resharding1',
20000 + base + i, 'msg-range2-%d' %
i, 0xE000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug("I have %d%% of the data", percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return
if timeout == 0:
self.fail("timeout waiting for %d%% of the data" % threshold)
logging.debug("sleeping until we get %d%%", threshold)
time.sleep(1)
timeout -= 1
# _check_lots_not_present makes sure no data is in the wrong shard
def _check_lots_not_present(self, count, base=0):
found = 0
for i in xrange(count):
self._check_value(shard_3_replica, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i,
should_be_here=False)
self._check_value(shard_2_replica2, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i,
should_be_here=False)
def _check_binlog_server_vars(self, tablet):
v = utils.get_vars(tablet.port)
self.assertIn('UpdateStreamKeyRangeStatements', v)
self.assertIn('UpdateStreamKeyRangeTransactions', v)
def _check_binlog_player_vars(self, tablet, seconds_behind_master_max = 0):
v = utils.get_vars(tablet.port)
self.assertIn('BinlogPlayerMapSize', v)
self.assertIn('BinlogPlayerSecondsBehindMaster', v)
self.assertIn('BinlogPlayerSecondsBehindMasterMap', v)
self.assertIn('BinlogPlayerSourceShardNameMap', v)
self.assertIn('0', v['BinlogPlayerSourceShardNameMap'])
self.assertEquals(v['BinlogPlayerSourceShardNameMap']['0'], 'test_keyspace/80-')
self.assertIn('BinlogPlayerSourceTabletAliasMap', v)
self.assertIn('0', v['BinlogPlayerSourceTabletAliasMap'])
if seconds_behind_master_max != 0:
self.assertTrue(v['BinlogPlayerSecondsBehindMaster'] <
seconds_behind_master_max,
'BinlogPlayerSecondsBehindMaster is too high: %d > %d' % (
v['BinlogPlayerSecondsBehindMaster'],
seconds_behind_master_max))
self.assertTrue(v['BinlogPlayerSecondsBehindMasterMap']['0'] <
seconds_behind_master_max,
'BinlogPlayerSecondsBehindMasterMap is too high: %d > %d' % (
v['BinlogPlayerSecondsBehindMasterMap']['0'],
seconds_behind_master_max))
def _check_stream_health_equals_binlog_player_vars(self, tablet):
blp_stats = utils.get_vars(tablet.port)
# Enforce health check because it's not running by default as tablets are not started with it.
utils.run_vtctl(["RunHealthCheck", tablet.tablet_alias, 'replica'])
stream_health, _ = utils.run_vtctl(['VtTabletStreamHealth',
'-count', '1',
tablet.tablet_alias],
trap_output=True, auto_log=True)
logging.debug("Got health: %s", stream_health)
data = json.loads(stream_health)
self.assertIn('realtime_stats', data)
self.assertNotIn('health_error', data['realtime_stats'])
# count is > 0 and therefore not omitted by the Go JSON marshaller.
self.assertIn('binlog_players_count', data['realtime_stats'])
self.assertEqual(blp_stats['BinlogPlayerMapSize'],
data['realtime_stats']['binlog_players_count'])
self.assertEqual(blp_stats['BinlogPlayerSecondsBehindMaster'],
data['realtime_stats'].get(
'seconds_behind_master_filtered_replication', 0))
def _test_keyrange_constraints(self):
with self.assertRaisesRegexp(dbexceptions.DatabaseError, '.*enforce keyspace_id range.*'):
self._exec_dml(
shard_0_master,
"insert into resharding1(id, msg, keyspace_id) values(1, 'msg', :keyspace_id)",
{"keyspace_id": 0x9000000000000000},
)
with self.assertRaisesRegexp(dbexceptions.DatabaseError, '.*enforce keyspace_id range.*'):
self._exec_dml(
shard_0_master,
"update resharding1 set msg = 'msg' where id = 1",
{"keyspace_id": 0x9000000000000000},
)
with self.assertRaisesRegexp(dbexceptions.DatabaseError, '.*enforce keyspace_id range.*'):
self._exec_dml(
shard_0_master,
"delete from resharding1 where id = 1",
{"keyspace_id": 0x9000000000000000},
)
def test_resharding(self):
utils.run_vtctl(['CreateKeyspace',
'--sharding_column_name', 'bad_column',
'--sharding_column_type', 'bytes',
'--split_shard_count', '2',
'test_keyspace'])
utils.run_vtctl(['SetKeyspaceShardingInfo', 'test_keyspace',
'keyspace_id', 'uint64'], expect_fail=True)
utils.run_vtctl(['SetKeyspaceShardingInfo',
'-force', '-split_shard_count', '4',
'test_keyspace', 'keyspace_id', keyspace_id_type])
shard_0_master.init_tablet( 'master', 'test_keyspace', '-80')
shard_0_replica.init_tablet('replica', 'test_keyspace', '-80')
shard_0_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '-80')
shard_1_master.init_tablet( 'master', 'test_keyspace', '80-')
shard_1_slave1.init_tablet('replica', 'test_keyspace', '80-')
shard_1_slave2.init_tablet('spare', 'test_keyspace', '80-')
shard_1_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '80-')
shard_1_rdonly1.init_tablet('rdonly', 'test_keyspace', '80-')
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
self.assertEqual(ks['SplitShardCount'], 4)
# we set full_mycnf_args to True as a test in the KIT_BYTES case
full_mycnf_args = keyspace_id_type == keyrange_constants.KIT_BYTES
# create databases so vttablet can start behaving normally
for t in [shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None, full_mycnf_args=full_mycnf_args)
# wait for the tablets
shard_0_master.wait_for_vttablet_state('SERVING')
shard_0_replica.wait_for_vttablet_state('SERVING')
shard_0_ny_rdonly.wait_for_vttablet_state('SERVING')
shard_1_master.wait_for_vttablet_state('SERVING')
shard_1_slave1.wait_for_vttablet_state('SERVING')
shard_1_slave2.wait_for_vttablet_state('NOT_SERVING') # spare
shard_1_ny_rdonly.wait_for_vttablet_state('SERVING')
shard_1_rdonly1.wait_for_vttablet_state('SERVING')
# reparent to make the tablets work
utils.run_vtctl(['InitShardMaster', 'test_keyspace/-80',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', 'test_keyspace/80-',
shard_1_master.tablet_alias], auto_log=True)
# create the tables
self._create_schema()
self._insert_startup_values()
self._test_keyrange_constraints()
# create the split shards
shard_2_master.init_tablet( 'master', 'test_keyspace', '80-c0')
shard_2_replica1.init_tablet('spare', 'test_keyspace', '80-c0')
shard_2_replica2.init_tablet('spare', 'test_keyspace', '80-c0')
shard_3_master.init_tablet( 'master', 'test_keyspace', 'c0-')
shard_3_replica.init_tablet( 'spare', 'test_keyspace', 'c0-')
shard_3_rdonly1.init_tablet( 'rdonly', 'test_keyspace', 'c0-')
# start vttablet on the split shards (no db created,
# so they're all not serving)
shard_3_master.start_vttablet(wait_for_state=None,
target_tablet_type='replica')
for t in [shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_replica, shard_3_rdonly1]:
t.start_vttablet(wait_for_state=None)
for t in [shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica, shard_3_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', 'test_keyspace/80-c0',
shard_2_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', 'test_keyspace/c0-',
shard_3_master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n' +
'Partitions(rdonly): -80 80-\n' +
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
# the worker will do everything. We test with source_reader_count=10
# (down from default=20) as connection pool is not big enough for 20.
# min_table_size_for_split is set to 1 as to force a split even on the
# small table we have.
# we need to create the schema, and the worker will do data copying
for keyspace_shard in ('test_keyspace/80-c0', 'test_keyspace/c0-'):
utils.run_vtctl(['CopySchemaShard', '--exclude_tables', 'unrelated',
shard_1_rdonly1.tablet_alias, keyspace_shard],
auto_log=True)
utils.run_vtworker(['--cell', 'test_nj',
'--command_display_interval', '10ms',
'SplitClone',
'--exclude_tables' ,'unrelated',
'--strategy=-populate_blp_checkpoint',
'--source_reader_count', '10',
'--min_table_size_for_split', '1',
'test_keyspace/80-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias,
'rdonly'], auto_log=True)
# TODO(alainjobart): experiment with the dontStartBinlogPlayer option
# check the startup values are in the right place
self._check_startup_values()
# check the schema too
utils.run_vtctl(['ValidateSchemaKeyspace', '--exclude_tables=unrelated',
'test_keyspace'], auto_log=True)
# check the binlog players are running and exporting vars
shard_2_master.wait_for_binlog_player_count(1)
shard_3_master.wait_for_binlog_player_count(1)
self._check_binlog_player_vars(shard_2_master)
self._check_binlog_player_vars(shard_3_master)
# check that binlog server exported the stats vars
self._check_binlog_server_vars(shard_1_slave1)
self._check_stream_health_equals_binlog_player_vars(shard_2_master)
self._check_stream_health_equals_binlog_player_vars(shard_3_master)
# testing filtered replication: insert a bunch of data on shard 1,
# check we get most of it after a few seconds, wait for binlog server
# timeout, check we get all of it.
logging.debug("Inserting lots of data on source shard")
self._insert_lots(1000)
logging.debug("Checking 80 percent of data is sent quickly")
self._check_lots_timeout(1000, 80, 5)
logging.debug("Checking all data goes through eventually")
self._check_lots_timeout(1000, 100, 20)
logging.debug("Checking no data was sent the wrong way")
self._check_lots_not_present(1000)
self._check_binlog_player_vars(shard_2_master, seconds_behind_master_max=30)
self._check_binlog_player_vars(shard_3_master, seconds_behind_master_max=30)
# use vtworker to compare the data
logging.debug("Running vtworker SplitDiff")
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff', '--exclude_tables',
'unrelated', 'test_keyspace/c0-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.pause("Good time to test vtworker for diffs")
# get status for a destination master tablet, make sure we have it all
shard_2_master_status = shard_2_master.get_status()
self.assertIn('Binlog player state: Running', shard_2_master_status)
self.assertIn('<td><b>All</b>: 6000<br><b>Query</b>: 4000<br><b>Transaction</b>: 2000<br></td>', shard_2_master_status)
self.assertIn('</html>', shard_2_master_status)
# start a thread to insert data into shard_1 in the background
# with current time, and monitor the delay
insert_thread_1 = InsertThread(shard_1_master, "insert_low", 10000,
0x9000000000000000)
insert_thread_2 = InsertThread(shard_1_master, "insert_high", 10001,
0xD000000000000000)
monitor_thread_1 = MonitorLagThread(shard_2_replica2, "insert_low")
monitor_thread_2 = MonitorLagThread(shard_3_replica, "insert_high")
# tests a failover switching serving to a different replica
utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'replica'])
utils.run_vtctl(['ChangeSlaveType', shard_1_slave1.tablet_alias, 'spare'])
shard_1_slave2.wait_for_vttablet_state('SERVING')
shard_1_slave1.wait_for_vttablet_state('NOT_SERVING')
# test data goes through again
logging.debug("Inserting lots of data on source shard")
self._insert_lots(1000, base=1000)
logging.debug("Checking 80 percent of data was sent quickly")
self._check_lots_timeout(1000, 80, 5, base=1000)
# check we can't migrate the master just yet
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
expect_fail=True)
# check query service is off on master 2 and master 3, as filtered
# replication is enabled. Even health check that is enabled on
# master 3 should not interfere (we run it to be sure).
utils.run_vtctl(['RunHealthCheck', shard_3_master.tablet_alias, 'replica'],
auto_log=True)
utils.check_tablet_query_service(self, shard_2_master, False, False)
utils.check_tablet_query_service(self, shard_3_master, False, False)
# check the destination master 3 is healthy, even though its query
# service is not running (if not healthy this would exception out)
shard_3_master.get_healthz()
# now serve rdonly from the split shards, in test_nj only
utils.run_vtctl(['MigrateServedTypes', '--cells=test_nj',
'test_keyspace/80-', 'rdonly'], auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n' +
'Partitions(rdonly): -80 80-c0 c0-\n' +
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
utils.check_srv_keyspace('test_ny', 'test_keyspace',
'Partitions(master): -80 80-\n' +
'Partitions(rdonly): -80 80-\n' +
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_rdonly1, False, True)
# now serve rdonly from the split shards, everywhere
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n' +
'Partitions(rdonly): -80 80-c0 c0-\n' +
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
utils.check_srv_keyspace('test_ny', 'test_keyspace',
'Partitions(master): -80 80-\n' +
'Partitions(rdonly): -80 80-c0 c0-\n' +
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_ny_rdonly, False, True)
utils.check_tablet_query_service(self, shard_1_rdonly1, False, True)
# then serve replica from the split shards
source_tablet = shard_1_slave2
destination_shards = ['test_keyspace/80-c0', 'test_keyspace/c0-']
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n' +
'Partitions(rdonly): -80 80-c0 c0-\n' +
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=keyspace_id_type)
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
# move replica back and forth
utils.run_vtctl(['MigrateServedTypes', '-reverse', 'test_keyspace/80-', 'replica'],
auto_log=True)
# After a backwards migration, queryservice should be enabled on source and disabled on destinations
utils.check_tablet_query_service(self, shard_1_slave2, True, False)
# Destination tablets would have query service disabled for other reasons than the migration,
# so check the shard record instead of the tablets directly
utils.check_shard_query_services(self, destination_shards,
tablet.Tablet.tablet_type_value['REPLICA'],
False)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n' +
'Partitions(rdonly): -80 80-c0 c0-\n' +
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
auto_log=True)
# After a forwards migration, queryservice should be disabled on source and enabled on destinations
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
# Destination tablets would have query service disabled for other reasons than the migration,
# so check the shard record instead of the tablets directly
utils.check_shard_query_services(self, destination_shards,
tablet.Tablet.tablet_type_value['REPLICA'],
True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n' +
'Partitions(rdonly): -80 80-c0 c0-\n' +
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=keyspace_id_type)
# reparent shard_2 to shard_2_replica1, then insert more data and
# see it flow through still
utils.run_vtctl(['PlannedReparentShard', 'test_keyspace/80-c0',
shard_2_replica1.tablet_alias])
logging.debug("Inserting lots of data on source shard after reparenting")
self._insert_lots(3000, base=2000)
logging.debug("Checking 80 percent of data was sent fairly quickly")
self._check_lots_timeout(3000, 80, 10, base=2000)
# use vtworker to compare the data again
logging.debug("Running vtworker SplitDiff")
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff', '--exclude_tables',
'unrelated', 'test_keyspace/c0-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
# going to migrate the master now, check the delays
monitor_thread_1.done = True
monitor_thread_2.done = True
insert_thread_1.done = True
insert_thread_2.done = True
logging.debug("DELAY 1: %s max_lag=%d avg_lag=%d",
monitor_thread_1.object_name,
monitor_thread_1.max_lag,
monitor_thread_1.lag_sum / monitor_thread_1.sample_count)
logging.debug("DELAY 2: %s max_lag=%d avg_lag=%d",
monitor_thread_2.object_name,
monitor_thread_2.max_lag,
monitor_thread_2.lag_sum / monitor_thread_2.sample_count)
# mock with the SourceShard records to test 'vtctl SourceShardDelete'
# and 'vtctl SourceShardAdd'
utils.run_vtctl(['SourceShardDelete', 'test_keyspace/c0-', '0'],
auto_log=True)
utils.run_vtctl(['SourceShardAdd', '--key_range=80-',
'test_keyspace/c0-', '0', 'test_keyspace/80-'],
auto_log=True)
# then serve master from the split shards, make sure the source master's
# query service is now turned off
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-c0 c0-\n' +
'Partitions(rdonly): -80 80-c0 c0-\n' +
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=keyspace_id_type)
utils.check_tablet_query_service(self, shard_1_master, False, True)
# check the binlog players are gone now
shard_2_master.wait_for_binlog_player_count(0)
shard_3_master.wait_for_binlog_player_count(0)
# get status for a destination master tablet, make sure it's good
shard_2_master_status = shard_2_master.get_status()
self.assertIn('No binlog player is running', shard_2_master_status)
self.assertIn('</html>', shard_2_master_status)
# scrap the original tablets in the original shard
for t in [shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
utils.run_vtctl(['ScrapTablet', t.tablet_alias], auto_log=True)
tablet.kill_tablets([shard_1_master, shard_1_slave1, shard_1_slave2,
shard_1_ny_rdonly, shard_1_rdonly1])
for t in [shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
# rebuild the serving graph, all mentions of the old shards shoud be gone
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# test RemoveShardCell
utils.run_vtctl(['RemoveShardCell', 'test_keyspace/-80', 'test_nj'], auto_log=True, expect_fail=True)
utils.run_vtctl(['RemoveShardCell', 'test_keyspace/80-', 'test_nj'], auto_log=True)
utils.run_vtctl(['RemoveShardCell', 'test_keyspace/80-', 'test_ny'], auto_log=True)
shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/80-'])
self.assertNotIn('cells', shard)
# delete the original shard
utils.run_vtctl(['DeleteShard', 'test_keyspace/80-'], auto_log=True)
# kill everything
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica, shard_3_rdonly1])
if __name__ == '__main__':
utils.main()
| |
#! /usr/bin/env python
####################################################################################################
# SET UP
#
#
# TODO: Probabilistic Guess Function
# TODO: Account for likelihood that ships are not adjacent
# TODO: Minimize Entropy Model
import socket
import time
import util
from copy import copy
ascii_board = 'A00000000B\nA00000000B\nA00000000B\nA00000000B\nA000000000\n0000000000\n0000000000\n0000000000\nP000000000\nP00DDD0SSS\n'
TEST_BOARD = [['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','P','?','?','?','?','?'],
['?','?','?','?','P','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?']]
BOARD = [['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?']]
NON_HUMAN_OPPONENTS = ['players/hunter_parity.py',
'players/hunter.py',
'players/randguess.py',
'players/tile.py']
SHIPS = ['A', 'B', 'D', 'P', 'S']
SHIP_SIZE = {'A':5, 'B':4, 'D':3, 'P':2, 'S':3}
####################################################################################################
# UTILITY FUNCTIONS
#
#
def read_board(ascii_board):
"""
Reads in a board from a ascii format as a board of array of arrays format
"""
board = []
row = []
col_num = 0
for char in ascii_board:
if(col_num < 10):
row.append(char)
col_num += 1
else:
board.append(row)
row = []
col_num = 0
return board
def print_board(board):
"""
Prints a board neatly
"""
for row in board:
for index in range(len(row)):
if(isinstance(row[index], int)):
if(row[index] < 10):
row[index] = ' ' + str(row[index])
else:
row[index] = str(row[index])
print(row)
def is_valid(row, col):
"""
Returns a boolean based on whether on not a (row, col) pair is a valid board coordinate
"""
return ((row >= 0) and (row <= 9) and (col >= 0) and (col <= 9))
def copy_of(board):
"""
Returns a copy of a board
"""
copy = []
for row_num in range(10):
row = []
for col_num in range(10):
row.append(board[row_num][col_num])
copy.append(row)
return copy
def generate_question_mark_board():
"""
Retruns a board filled with question marks
"""
board = []
for row in range(10):
row = []
for col in range(10):
row.append('?')
board.append(row)
return board
def generate_scoring_board():
"""
Generates a board of values used to score your arrangement of ships
"""
scoring_board = board_possibility_counter(BOARD)
max_possibilites = scoring_board[4][4]
for row in range(10):
for col in range(10):
scoring_board[row][col] = max_possibilites - scoring_board[row][col]
return scoring_board
def generate_playing_board(duration):
"""
Generates a random playing board
"""
timeout = time.time() + duration
random_board_string = util.gen_random_board_str()
max_score = score(read_board(random_board_string))
while time.time() < timeout:
new_random_board_string = util.gen_random_board_str()
new_score = score(read_board(new_random_board_string))
if new_score > max_score:
random_board_string = new_random_board_string
max_score = new_score
print max_score
return random_board_string
def x_in_board(board):
"""
Determines whether or not there is a hit but unsunk ship in board
"""
for row in range(10):
for col in range(10):
if(board[row][col] == 'X'):
return True
return False
def smallest_ship_size(ships):
"""
Returns the size of the smallest ship in a given list of ships
"""
if len(ships) != 0:
ship_sizes = []
for ship in ships:
ship_sizes.append(SHIP_SIZE[ship])
return min(ship_sizes)
else:
return max(SHIP_SIZE.values())
def unsunk_ships(board):
"""
Returns a list of the ships that have not yet been sunk
"""
ships = SHIPS
for row in range(10):
for col in range(10):
if(board[row][col] in ships):
ships.remove(board[row][col])
return ships
def surrounding_unsunk_hits(board, row, col):
"""
Return the coordinates of all surrounding X's
"""
unsunk_hits = []
if(is_valid(row - 1, col)):
if(board[row - 1][col] == 'X'):
unsunk_hits.append((row - 1, col))
if(is_valid(row + 1, col)):
if(board[row + 1][col] == 'X'):
unsunk_hits.append((row + 1, col))
if(is_valid(row, col - 1)):
if(board[row][col - 1] == 'X'):
unsunk_hits.append((row, col - 1))
if(is_valid(row, col + 1)):
if(board[row][col + 1] == 'X'):
unsunk_hits.append((row, col + 1))
return unsunk_hits
def count_unknown_spaces(board, row, col, direction):
"""
Counts the number (up to 4) of '?' spaces in any given direction ('up', 'down', 'left', 'right')
From the (row, col) space on the board
"""
unknown = 0
shift = 1
if(direction == 'left'):
while(is_valid(row, col - shift)):
if(board[row][col - shift] != '?'):
break
unknown += 1
shift += 1
elif(direction == 'right'):
while(is_valid(row, col + shift)):
if(board[row][col + shift] != '?'):
break
unknown += 1
shift += 1
elif(direction == 'up'):
while(is_valid(row - shift, col)):
if(board[row - shift][col] != '?'):
break
unknown += 1
shift += 1
elif(direction == 'down'):
while(is_valid(row + shift, col)):
if(board[row + shift][col] != '?'):
break
unknown += 1
shift += 1
return unknown
def sunken_ship_update(board, row, col, ship):
"""
Returns the direction from (row, col) that the specified newly sunken ship is
"""
updated_board = copy_of(board)
# Check Left
flag = True
for shift in range(SHIP_SIZE[ship]):
if(is_valid(row, col - shift) == False):
flag = False
break
elif(board[row][col - shift] != 'X'):
flag = False
break
else:
updated_board[row][col - shift] = ship
if(flag == True):
return updated_board
else:
updated_board = copy_of(board)
# Check Right
flag = True
for shift in range(SHIP_SIZE[ship]):
if(is_valid(row, col + shift) == False):
flag = False
break
elif(board[row][col + shift] != 'X'):
flag = False
break
else:
updated_board[row][col + shift] = ship
if(flag == True):
return updated_board
else:
updated_board = copy_of(board)
# Check Up
flag = True
for shift in range(SHIP_SIZE[ship]):
if(is_valid(row - shift, col) == False):
flag = False
break
elif(board[row - shift][col] != 'X'):
flag = False
break
else:
updated_board[row - shift][col] = ship
if(flag == True):
return updated_board
else:
updated_board = copy_of(board)
# Check Down
flag = True
for shift in range(SHIP_SIZE[ship]):
if(is_valid(row + shift, col) == False):
flag = False
break
elif(board[row + shift][col] != 'X'):
flag = False
break
else:
updated_board[row + shift][col] = ship
if(flag == True):
return updated_board
else:
updated_board = copy_of(board)
return updated_board
####################################################################################################
# CALCULATION FUNCTIONS
#
#
def score(board):
"""
Returns the score of a board according to the scoring board
"""
scoring_board = generate_scoring_board()
score = 0
for row in range(10):
for col in range(10):
if board[row][col] != '0':
score += scoring_board[row][col]
return score
def line_possibility_counter(spaces1, spaces2, given_spaces, ships):
"""
Counts the number of possible ways to place ships in a horizontal line from a given space,
Given that there are spaces1 unknown spaces on one side, spaces2 unknown spaces on the other side,
and the array of ships currently still unsunk
"""
count = 0
for ship in ships:
count += single_ship_line_possibility_counter(spaces1, spaces2, given_spaces, ship)
return count
def single_ship_line_possibility_counter(spaces1, spaces2, given_spaces, ship):
"""
Counts the number of possible ways to place a single ship in a horizontal line from a given space,
Given that there are spaces1 unknown spaces on one side, spaces2 unknown spaces on the other side
"""
ship_size = SHIP_SIZE[ship]
free_spaces = ship_size - given_spaces
if(spaces1 > free_spaces):
spaces1 = free_spaces
if(spaces2 > free_spaces):
spaces2 = free_spaces
if(spaces1 + spaces2 - free_spaces + 1 > 0):
return spaces1 + spaces2 - free_spaces + 1
else:
return 0
def board_possibility_counter(board):
"""
Counts the number of ways ships can be placed in each spot in a given board
"""
counts_board = []
for row in range(10):
counts_row = []
for col in range(10):
count = 0
if(board[row][col] == '?'):
ships = unsunk_ships(board)
left_unknown = count_unknown_spaces(board, row, col, 'left')
right_unknown = count_unknown_spaces(board, row, col, 'right')
up_unknown = count_unknown_spaces(board, row, col, 'up')
down_unknown = count_unknown_spaces(board, row, col, 'down')
count += line_possibility_counter(left_unknown, right_unknown, 1, ships)
count += line_possibility_counter(up_unknown, down_unknown, 1, ships)
counts_row.append(count)
counts_board.append(counts_row)
return counts_board
####################################################################################################
# MOVE GUESSING FUNCTIONS
#
#
def guess(their_board):
"""
Returns a calculated guess based on the opponent's board
"""
board = board_possibility_counter(their_board)
ships_remaining = unsunk_ships(board)
min_size = smallest_ship_size(ships_remaining)
max_coords = (0, 0)
for row in range(10):
for col in range(10):
if(((row + col) % min_size) == 0):
if(board[row][col] > board[max_coords[0]][max_coords[1]]):
max_coords = (row, col)
return max_coords
def target_helper(board, line_type, num, ships):
"""
Returns a targeted guess for when there have been hits but no sink
Only searches a single row (line_type = 'row') or column (line_type = 'col')
"""
possibilities_map = {}
if(line_type == 'row'):
left_hit_col = -1
right_hit_col = -1
for col in range(10):
if(board[num][col] == 'X'):
# Set start col of hits in row (if applicable)
if(is_valid(num, col - 1)):
if(board[num][col - 1] != 'X'):
left_hit_col = col - 1
# Set end col of hits in row (if applicable)
if(is_valid(num, col + 1)):
if(board[num][col + 1] != 'X'):
right_hit_col = col + 1
left_hit_ways = 0
right_hit_ways = 0
given_spaces = right_hit_col - left_hit_col
in_a_row_bonus = 1
if(given_spaces >= 3):
in_a_row_bonus = 100
if(is_valid(num, left_hit_col)):
if(board[num][left_hit_col] == '?'):
left_unknown = count_unknown_spaces(board, num, left_hit_col, 'left')
right_unknown = count_unknown_spaces(board, num, right_hit_col - 1, 'right')
left_hit_ways = line_possibility_counter(left_unknown, right_unknown, given_spaces, ships)
possibilities_map[(num, left_hit_col)] = in_a_row_bonus + left_hit_ways
if(is_valid(num, right_hit_col)):
if(board[num][right_hit_col] == '?'):
left_unknown = count_unknown_spaces(board, num, left_hit_col + 1, 'left')
right_unknown = count_unknown_spaces(board, num, right_hit_col, 'right')
right_hit_ways = line_possibility_counter(left_unknown, right_unknown, given_spaces, ships)
possibilities_map[(num, right_hit_col)] = in_a_row_bonus + right_hit_ways
elif(line_type == 'col'):
up_hit_row = -1
down_hit_row = -1
for row in range(10):
if(board[row][num] == 'X'):
# Set start col of hits in row (if applicable)
if(is_valid(row - 1, num)):
if(board[row - 1][num] != 'X'):
up_hit_row = row - 1
# Set end col of hits in row (if applicable)
if(is_valid(row + 1, num)):
if(board[row + 1][num] != 'X'):
down_hit_row = row + 1
up_hit_ways = 0
down_hit_ways = 0
given_spaces = down_hit_row - up_hit_row
in_a_row_bonus = 1
if(given_spaces >= 3):
in_a_row_bonus = 100
if(is_valid(up_hit_row, num)):
if(board[up_hit_row][num] == '?'):
up_unknown = count_unknown_spaces(board, up_hit_row, num, 'up')
down_unknown = count_unknown_spaces(board, down_hit_row - 1, num, 'down')
up_hit_ways = line_possibility_counter(up_unknown, down_unknown, given_spaces, ships)
possibilities_map[(up_hit_row, num)] = in_a_row_bonus + up_hit_ways
if(is_valid(down_hit_row, num)):
if(board[down_hit_row][num] == '?'):
up_unknown = count_unknown_spaces(board, up_hit_row + 1, num, 'up')
down_unknown = count_unknown_spaces(board, down_hit_row, num, 'down')
down_hit_ways = line_possibility_counter(up_unknown, down_unknown, given_spaces, ships)
possibilities_map[(down_hit_row, num)] = in_a_row_bonus + down_hit_ways
return possibilities_map
def target(board):
"""
Returns a targeted guess given a board with hit but unsunk ships
"""
# TODO: Have targeted guesses prefer a specific parity and/or squares with high guess index
possibilities_map = {}
ships = unsunk_ships(board)
for row in range(10):
row_map = target_helper(board, 'row', row, ships)
for coords in row_map:
if(coords in possibilities_map):
possibilities_map[coords] += row_map[coords]
else:
possibilities_map[coords] = row_map[coords]
for col in range(10):
col_map = target_helper(board, 'col', col, ships)
for coords in col_map:
if(coords in possibilities_map):
possibilities_map[coords] += col_map[coords]
else:
possibilities_map[coords] = col_map[coords]
target_coords = max(possibilities_map, key=possibilities_map.get)
# return possibilities_map
return target_coords
def fire(board):
"""
Returns the move for the turn
"""
if(x_in_board(board)):
return target(board)
else:
return guess(board)
def update_board(board, row, col, last_fire_result):
"""
Returns an updated game board based on the last_fire_result
"""
updated_board = copy_of(board)
if(last_fire_result == 'M'):
updated_board[row][col] = '0'
elif(last_fire_result == 'H'):
updated_board[row][col] = 'X'
elif last_fire_result.startswith('S'):
updated_board[row][col] = 'X'
updated_board = sunken_ship_update(updated_board, row, col, last_fire_result[1])
return updated_board
def play_game():
"""
Plays a full game of battleship against an opponent
"""
board = copy_of(BOARD)
comm = util.Communication()
initstring = comm.readline()
turn, opponent = initstring.split(",")
opponent = opponent.strip()
# Generate and send my board
if opponent in NON_HUMAN_OPPONENTS:
genboard = ascii_board
else:
genboard = generate_playing_board(1.95)
for line in genboard.splitlines():
comm.sendline(line)
if turn == "0":
myturn = True
else:
myturn = False
guesses = set()
while True:
try:
if myturn:
# Send a guess
guess = fire(board)
guessx, guessy = guess
guesses.add(guess)
comm.sendline("{},{}".format(guessx, guessy))
# Read what happened
data = comm.readline().strip()
board = update_board(board, guessx, guessy, data)
myturn = False
else:
# Read opponent's guess
data = comm.readline()
myturn = True
except socket.error:
# Game is over, we either won or lost
print "Game Finished"
play_game()
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Reader ops from io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import threading
import tensorflow as tf
class IdentityReaderTest(tf.test.TestCase):
def _ExpectRead(self, sess, key, value, expected):
k, v = sess.run([key, value])
self.assertAllEqual(expected, k)
self.assertAllEqual(expected, v)
def testOneEpoch(self):
with self.test_session() as sess:
reader = tf.IdentityReader("test_reader")
work_completed = reader.num_work_units_completed()
produced = reader.num_records_produced()
queue = tf.FIFOQueue(99, [tf.string], shapes=())
queued_length = queue.size()
key, value = reader.read(queue)
self.assertAllEqual(0, work_completed.eval())
self.assertAllEqual(0, produced.eval())
self.assertAllEqual(0, queued_length.eval())
queue.enqueue_many([["A", "B", "C"]]).run()
queue.close().run()
self.assertAllEqual(3, queued_length.eval())
self._ExpectRead(sess, key, value, b"A")
self.assertAllEqual(1, produced.eval())
self._ExpectRead(sess, key, value, b"B")
self._ExpectRead(sess, key, value, b"C")
self.assertAllEqual(3, produced.eval())
self.assertAllEqual(0, queued_length.eval())
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
self.assertAllEqual(3, work_completed.eval())
self.assertAllEqual(3, produced.eval())
self.assertAllEqual(0, queued_length.eval())
def testMultipleEpochs(self):
with self.test_session() as sess:
reader = tf.IdentityReader("test_reader")
queue = tf.FIFOQueue(99, [tf.string], shapes=())
enqueue = queue.enqueue_many([["DD", "EE"]])
key, value = reader.read(queue)
enqueue.run()
self._ExpectRead(sess, key, value, b"DD")
self._ExpectRead(sess, key, value, b"EE")
enqueue.run()
self._ExpectRead(sess, key, value, b"DD")
self._ExpectRead(sess, key, value, b"EE")
enqueue.run()
self._ExpectRead(sess, key, value, b"DD")
self._ExpectRead(sess, key, value, b"EE")
queue.close().run()
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
def testSerializeRestore(self):
with self.test_session() as sess:
reader = tf.IdentityReader("test_reader")
produced = reader.num_records_produced()
queue = tf.FIFOQueue(99, [tf.string], shapes=())
queue.enqueue_many([["X", "Y", "Z"]]).run()
key, value = reader.read(queue)
self._ExpectRead(sess, key, value, b"X")
self.assertAllEqual(1, produced.eval())
state = reader.serialize_state().eval()
self._ExpectRead(sess, key, value, b"Y")
self._ExpectRead(sess, key, value, b"Z")
self.assertAllEqual(3, produced.eval())
queue.enqueue_many([["Y", "Z"]]).run()
queue.close().run()
reader.restore_state(state).run()
self.assertAllEqual(1, produced.eval())
self._ExpectRead(sess, key, value, b"Y")
self._ExpectRead(sess, key, value, b"Z")
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
self.assertAllEqual(3, produced.eval())
self.assertEqual(bytes, type(state))
with self.assertRaises(ValueError):
reader.restore_state([])
with self.assertRaises(ValueError):
reader.restore_state([state, state])
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(state[1:]).run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(state[:-1]).run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(state + b"ExtraJunk").run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(b"PREFIX" + state).run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(b"BOGUS" + state[5:]).run()
def testReset(self):
with self.test_session() as sess:
reader = tf.IdentityReader("test_reader")
work_completed = reader.num_work_units_completed()
produced = reader.num_records_produced()
queue = tf.FIFOQueue(99, [tf.string], shapes=())
queued_length = queue.size()
key, value = reader.read(queue)
queue.enqueue_many([["X", "Y", "Z"]]).run()
self._ExpectRead(sess, key, value, b"X")
self.assertLess(0, queued_length.eval())
self.assertAllEqual(1, produced.eval())
self._ExpectRead(sess, key, value, b"Y")
self.assertLess(0, work_completed.eval())
self.assertAllEqual(2, produced.eval())
reader.reset().run()
self.assertAllEqual(0, work_completed.eval())
self.assertAllEqual(0, produced.eval())
self.assertAllEqual(1, queued_length.eval())
self._ExpectRead(sess, key, value, b"Z")
queue.enqueue_many([["K", "L"]]).run()
self._ExpectRead(sess, key, value, b"K")
class WholeFileReaderTest(tf.test.TestCase):
def setUp(self):
super(WholeFileReaderTest, self).setUp()
self._filenames = [os.path.join(self.get_temp_dir(), "whole_file.%d.txt" % i)
for i in range(3)]
self._content = [b"One\na\nb\n", b"Two\nC\nD", b"Three x, y, z"]
for fn, c in zip(self._filenames, self._content):
open(fn, "wb").write(c)
def tearDown(self):
super(WholeFileReaderTest, self).tearDown()
for fn in self._filenames:
os.remove(fn)
def _ExpectRead(self, sess, key, value, index):
k, v = sess.run([key, value])
self.assertAllEqual(tf.compat.as_bytes(self._filenames[index]), k)
self.assertAllEqual(self._content[index], v)
def testOneEpoch(self):
with self.test_session() as sess:
reader = tf.WholeFileReader("test_reader")
queue = tf.FIFOQueue(99, [tf.string], shapes=())
queue.enqueue_many([self._filenames]).run()
queue.close().run()
key, value = reader.read(queue)
self._ExpectRead(sess, key, value, 0)
self._ExpectRead(sess, key, value, 1)
self._ExpectRead(sess, key, value, 2)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
def testInfiniteEpochs(self):
with self.test_session() as sess:
reader = tf.WholeFileReader("test_reader")
queue = tf.FIFOQueue(99, [tf.string], shapes=())
enqueue = queue.enqueue_many([self._filenames])
key, value = reader.read(queue)
enqueue.run()
self._ExpectRead(sess, key, value, 0)
self._ExpectRead(sess, key, value, 1)
enqueue.run()
self._ExpectRead(sess, key, value, 2)
self._ExpectRead(sess, key, value, 0)
self._ExpectRead(sess, key, value, 1)
enqueue.run()
self._ExpectRead(sess, key, value, 2)
self._ExpectRead(sess, key, value, 0)
class TextLineReaderTest(tf.test.TestCase):
def setUp(self):
super(TextLineReaderTest, self).setUp()
self._num_files = 2
self._num_lines = 5
def _LineText(self, f, l):
return tf.compat.as_bytes("%d: %d" % (f, l))
def _CreateFiles(self, crlf=False):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
f = open(fn, "wb")
for j in range(self._num_lines):
f.write(self._LineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it sometimes.
if j + 1 != self._num_lines or i == 0:
f.write(b"\r\n" if crlf else b"\n")
return filenames
def _testOneEpoch(self, files):
with self.test_session() as sess:
reader = tf.TextLineReader(name="test_reader")
queue = tf.FIFOQueue(99, [tf.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_lines):
k, v = sess.run([key, value])
self.assertAllEqual("%s:%d" % (files[i], j + 1), tf.compat.as_text(k))
self.assertAllEqual(self._LineText(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testOneEpochLF(self):
self._testOneEpoch(self._CreateFiles(crlf=False))
def testOneEpochCRLF(self):
self._testOneEpoch(self._CreateFiles(crlf=True))
def testSkipHeaderLines(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = tf.TextLineReader(skip_header_lines=1, name="test_reader")
queue = tf.FIFOQueue(99, [tf.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_lines - 1):
k, v = sess.run([key, value])
self.assertAllEqual("%s:%d" % (files[i], j + 2), tf.compat.as_text(k))
self.assertAllEqual(self._LineText(i, j + 1), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
class FixedLengthRecordReaderTest(tf.test.TestCase):
def setUp(self):
super(FixedLengthRecordReaderTest, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
def _Record(self, f, r):
return tf.compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _CreateFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
f = open(fn, "wb")
f.write(b"H" * self._header_bytes)
for j in range(self._num_records):
f.write(self._Record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
def testOneEpoch(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = tf.FixedLengthRecordReader(
header_bytes=self._header_bytes,
record_bytes=self._record_bytes,
footer_bytes=self._footer_bytes,
name="test_reader")
queue = tf.FIFOQueue(99, [tf.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertAllEqual("%s:%d" % (files[i], j), tf.compat.as_text(k))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
class TFRecordReaderTest(tf.test.TestCase):
def setUp(self):
super(TFRecordReaderTest, self).setUp()
self._num_files = 2
self._num_records = 7
def _Record(self, f, r):
return tf.compat.as_bytes("Record %d of file %d" % (r, f))
def _CreateFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = tf.python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._Record(i, j))
return filenames
def testOneEpoch(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = tf.TFRecordReader(name="test_reader")
queue = tf.FIFOQueue(99, [tf.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(tf.compat.as_text(k).startswith("%s:" % files[i]))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
class AsyncReaderTest(tf.test.TestCase):
def testNoDeadlockFromQueue(self):
"""Tests that reading does not block main execution threads."""
config = tf.ConfigProto(inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
thread_data_t = collections.namedtuple("thread_data_t",
["thread", "queue", "output"])
thread_data = []
# Create different readers, each with its own queue.
for i in range(3):
queue = tf.FIFOQueue(99, [tf.string], shapes=())
reader = tf.TextLineReader()
_, line = reader.read(queue)
output = []
t = threading.Thread(target=AsyncReaderTest._RunSessionAndSave,
args=(sess, [line], output))
thread_data.append(thread_data_t(t, queue, output))
# Start all readers. They are all blocked waiting for queue entries.
sess.run(tf.initialize_all_variables())
for d in thread_data:
d.thread.start()
# Unblock the readers.
for i, d in enumerate(reversed(thread_data)):
fname = os.path.join(self.get_temp_dir(), "deadlock.%s.txt" % i)
with open(fname, "wb") as f:
f.write(("file-%s" % i).encode())
d.queue.enqueue_many([[fname]]).run()
d.thread.join()
self.assertEqual([[("file-%s" % i).encode()]], d.output)
@staticmethod
def _RunSessionAndSave(sess, args, output):
output.append(sess.run(args))
if __name__ == "__main__":
tf.test.main()
| |
from decimal import Decimal
import os.path
import string
import subprocess
import sys
try:
"""
If used in a package, package logging functions are used instead of stderr.
"""
from . import debug, info, warning, error, fatal
except:
def error(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
debug = info = warning = fatal = error
from . import ConverterBase, SplitterException, progress_bar
errors = [ 'PROCESSING FAILED',
'At least one file cannot be processed',
'exists but it is too short' ]
class AsfBinException(SplitterException):
pass
'''
Application name is asfbin
ASFBIN - version asfbin 1.8.1.892. Copyright 2001-2010 by RadioActive.
Non-commercial version.
Visit www.radioactivepages.com for latest updates.
-
This tool cuts one or more fragments from ASF file(s) (*.WVM also) and writes it to a specified output file. It treats a list of input files as a one continuous WM file, thus it can be also used for joining ASF files together.
Because AsfBin doesn't need to index input file(s), it can skip damaged part of the file. In the other words it can repair damaged ASF files.
Use this tool only for evaluation purposes.
USAGE:
asfbin [INPUT MEDIA FILES] -o <out_file> [SWITCHES]
[INPUT MEDIA FILES] can be specified by:
-i <in_file> - input windows media file, can be repeated many times,
-l <in_file_list> - file containing list of files to join.
[SWITCHES] are as follow:
-sep - write each segment to a separate file. Output file name will be treated like a name template where all occurences of {000} or { } are replaced by the segment number. If {0} is not present, a number will be inserted right before the file name extension.
-s <segments_list> - file containing list of segments to extract,
-a <attrib_list> - file containing list of attributes to set.
-m <marker_list> - file containing list of markers to set.
-k <script_list> - file containing list of scripts to set.
-start <time> - start copying from specified time,
-dur <time> - copy segment of specified time, these two switches can be repeated many times, each pair defining a new segment to extract
-stop <time> - stop copying at specified time,
-invert - invert selection. Specified segments will be removed,
-repeat <n> - repeat the entire resulting file <n> times.
-istart - don't wait for key frame. Files are joined without any advanced fitting. Can be used for files previously cut. By default copying starts after finding a key frame.
-cvb - always copy very beginning of input file discarding even finding key frame when joining too or more files,
-brkaud - Audio streams junctions will be marked as gap.
-brkvid - Video streams junctions will be marked as gap. This option may be useful when joining two files encoded by slightly different versions of codec what may cause artefacts appear on segment junctions.
-ebkf - streams will end before nearest past key frame,
-u - makes resulting files unique by changing original ASF file identificators into unique ones,
-act - adjust creation time of the file to the time of creation of the original file plus start time.
-nots - leaves sample times and packet times unchanged.
-noindex - don't index output file,
-forceindex - force writting advanced index,
-sionly - simple index only,
-nomarkers - don't copy markers,
-noscripts - don't copy script command,
-nostr <numbers> - don't copy selected streams. <numbers> are stream numbers separated by space or comma. This switch can be used many times.
-q - quiet mode - only few information are presented,
-v - verbose mode - turned on by default,
-details - stronger verbose mode - shows many details about copying process, among other things key frames,
-debug - strongest verbose mode - debug mode,
-y - overwrite without asking,
-bw <milliseconds> - forces setting of the initial play delay. This value has direct impact on the internal bucket size. Selecting too small value may cause sample losing.
-ps <bytes> - forces size of data packets.
-optps - sets optimal size of data packets.
-adelay [-]<time> - audio delay. Can be negative value,
-sdelay <number> <time> - stream delay. Can be negative value,
-info - just show information on input sources.
-infokf - just show information on input sources and locations of key frames in selected time range.
-infoidx - show detailed information on indices appended to a processed file. Add -details switch to get additional information on any eventual errors.
-infohdr - show detailed information on file header.
-h - show this help screen.
<time> in general is given in seconds, but it accepts following formats as well:
1:59:45.35 = 1 h, 59 min, 45s, 35 hundredths, 3:30 = 3 min, 30 sec., 1023.101 = 1023 sec. and 101 thousandths, etc.
<in_file_list> format: - each line contains next file to read/join.
<segments_list> format: - each line contains one segment description:
<start_time><separators><duration> e.g.: 14:45, 3:00
<attrib_list> format: - each line consists of: <attribute_name>=<value>
The format is similar to the format of *.INI files. Following attributes are available to set:
Title, Author, Description, Rating, Copyright.
<marker_list> format: - each line consists of: <time> <marker_name>
<script_list> format: - each line consists of:
<time> <command_type> <command_string>, where command type is "URL" or "FILENAME". Custom types are also allowed.
(*) - those options does not guarantee correct results. While it is highly likely that WMV3, WVC1, WMVA, MP42 and MP43 video formats will be correctly handled, all other formats may not be recognized.
'''
def probe(*args, commands=['-info', '-infohdr']):
a = AsfBinConverter()
for filename in args:
proc = subprocess.Popen([a.executable, '-i', filename]+commands,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE)
r, _ = a.parse_output(proc.communicate(), returncode=proc.returncode)
if not r:
return False
return True
class AsfBinConverter(ConverterBase):
@staticmethod
def match_filenames(*args):
r = []
y = r.append
for arg in args:
_, ext = os.path.splitext(arg)
if ext in ( '.ASF', '.WMV' ):
y(ext)
return r
def __init__(self, **kwargs):
self.dry_run = kwargs.pop('dry_run', None)
if sys.platform.startswith('win'):
self.executable = 'ASFBIN.EXE'
else:
self.executable = 'asfbin'
self.extra_options = kwargs
def get_commands(self, input_filename,
output_filename='{filepart}_.WMV',
segments_filename='{basename}.AsfBin.segments',
**kwargs):
options = kwargs
dirname, basename = os.path.split(input_filename)
filepart, ext = os.path.splitext(basename)
try:
output_filename = output_filename.format(**locals())
except:
warning( "output_filename={}, which is probably not what you intended".format(output_filename) )
try:
segments_filename = segments_filename.format(**locals())
except:
warning( "segments_filename={}, which is probably not what you intended".format(segments_filename) )
commands = options.pop('commands', [ '-sep' ])+['-o', output_filename ]
if 'title' in options or 'attributes' in options:
a = options.pop('attributes', {})
if 'title' in options:
a['Title'] = options.pop('title')
attributes_filename = basename+'.AsfBin.attributes'
with open(attributes_filename, 'w') as ofo:
for k, v in attributes.items():
ofo.write('{}={}\n'.format(k.title(), v))
commands += [ '-a', attributes_filename ]
if 'splits' in options:
'''AsfBin takes (timestamp, duration) instead of (timestamp, timestamp)'''
my_pairs = [ (b, Decimal(e)-Decimal(b)) for (b, e) in options.pop('splits') ]
with open(segments_filename, 'w') as ofo:
for b, d in my_pairs:
ofo.write('{}, {}\n'.format(b, d))
commands += [ '-s', segments_filename ]
if 'frames' in options:
raise NotImplementedError()
if 'chapters' in options: # these are pairs
markers_filename = basename+'.AsfBin.markers'
with open(markers_filename, 'w') as ofo:
for t, n in options.pop(chapters):
ofo.writeline('{} {}\n'.format(t, n))
commands += [ '-m', markers_filename ]
for k, v in options.items():
debug("Extra parameter unused: {}={}".format(k, v))
# AsfBin is picky about this order:
return [ [ asfbin_executable, '-i', input_filename ]+commands ]
def parse_output(self, streams, **kwargs):
stdout_contents, _ = streams
debug( "{:,}B of stdout".format(len(stdout_contents)) )
for b in stdout_contents.split(b'\n'):
parse_line(b)
return kwargs.pop('returncode', 0) == 0, []
def parse_line(b,
prefix='STDOUT',
progress=print if sys.stdout.isatty() else (lambda x: None),
encoding='ASCII'):
line = b.decode(encoding).rstrip()
if line.startswith('0-100%:'): # progress
#return line
progress(line)
return
for text in errors:
if text in line:
raise AsfBinException(line)
break
else:
debug(prefix+' '+line)
if sys.platform.startswith('win'):
asfbin_executable = 'ASFBIN.EXE'
else:
asfbin_executable = 'asfbin'
debug("AsfBin is {}".format(asfbin_executable))
def AsfBin_command(input_filename, output_filename='', segments_filename='', **kwargs):
dirname, basename = os.path.split(input_filename)
filepart, ext = os.path.splitext(basename)
if not segments_filename:
segments_filename = basename+'.AsfBin.segments'
commands = kwargs.pop('commands', [ '-sep' ])
if not output_filename:
#output_filename = filepart+'_{000}'+'.WMV' # buggy?
output_filename = filepart+'_'+'.WMV'
commands += ['-o', output_filename ]
if 'title' in kwargs or 'attributes' in kwargs:
a = kwargs.pop('attributes', {})
if 'title' in kwargs:
a['Title'] = kwargs.pop('title')
attributes_filename = basename+'.AsfBin.attributes'
with open(attributes_filename, 'w') as ofo:
for k, v in attributes.items():
ofo.write('{}={}\n'.format(k.title(), v))
commands += [ '-a', attributes_filename ]
if 'splits' in kwargs:
'''AsfBin takes (timestamp, duration) instead of (timestamp, timestamp)'''
my_pairs = [ (b, Decimal(e)-Decimal(b)) for (b, e) in kwargs.pop('splits') ]
with open(segments_filename, 'w') as ofo:
for b, d in my_pairs:
ofo.write('{}, {}\n'.format(b, d))
commands += [ '-s', segments_filename ]
if 'frames' in kwargs: # TODO
raise NotImplementedError()
if 'chapters' in kwargs: # these are pairs
markers_filename = basename+'.AsfBin.markers'
with open(markers_filename, 'w') as ofo:
for t, n in kwargs.pop(chapters):
ofo.writeline('{} {}\n'.format(t, n))
commands += [ '-m', markers_filename ]
for k, v in kwargs.items():
debug("Extra parameter unused: {}={}".format(k, v))
# AsfBin is picky about this order:
return [ asfbin_executable, '-i', input_filename ]+commands
def parse_output(out, err='', returncode=None):
errors = [ 'PROCESSING FAILED', 'At least one file cannot be processed', 'exists but it is too short' ]
def parse_line(b, prefix='STDOUT', encoding='ASCII'):
line = b.decode(encoding).rstrip()
for text in errors:
if text in line:
raise AsfBinException(line)
break
if line.startswith('0-100%:'): # progress
return line
else:
debug(prefix+' '+line)
#for b in err.splitlines(): # AsfBin doesn't use stderr
# parse_line(b, prefix='STDERR')
for b in out.splitlines():
parse_line(b)
return returncode or 0
### EOF
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrivateDnsZoneGroupsOperations(object):
"""PrivateDnsZoneGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
private_endpoint_name, # type: str
private_dns_zone_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'privateDnsZoneGroupName': self._serialize.url("private_dns_zone_group_name", private_dns_zone_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}/privateDnsZoneGroups/{privateDnsZoneGroupName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
private_endpoint_name, # type: str
private_dns_zone_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified private dns zone group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param private_endpoint_name: The name of the private endpoint.
:type private_endpoint_name: str
:param private_dns_zone_group_name: The name of the private dns zone group.
:type private_dns_zone_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'privateDnsZoneGroupName': self._serialize.url("private_dns_zone_group_name", private_dns_zone_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}/privateDnsZoneGroups/{privateDnsZoneGroupName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
private_endpoint_name, # type: str
private_dns_zone_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PrivateDnsZoneGroup"
"""Gets the private dns zone group resource by specified private dns zone group name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param private_endpoint_name: The name of the private endpoint.
:type private_endpoint_name: str
:param private_dns_zone_group_name: The name of the private dns zone group.
:type private_dns_zone_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateDnsZoneGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.PrivateDnsZoneGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateDnsZoneGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'privateDnsZoneGroupName': self._serialize.url("private_dns_zone_group_name", private_dns_zone_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateDnsZoneGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}/privateDnsZoneGroups/{privateDnsZoneGroupName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
private_endpoint_name, # type: str
private_dns_zone_group_name, # type: str
parameters, # type: "_models.PrivateDnsZoneGroup"
**kwargs # type: Any
):
# type: (...) -> "_models.PrivateDnsZoneGroup"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateDnsZoneGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'privateDnsZoneGroupName': self._serialize.url("private_dns_zone_group_name", private_dns_zone_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateDnsZoneGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateDnsZoneGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PrivateDnsZoneGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}/privateDnsZoneGroups/{privateDnsZoneGroupName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
private_endpoint_name, # type: str
private_dns_zone_group_name, # type: str
parameters, # type: "_models.PrivateDnsZoneGroup"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PrivateDnsZoneGroup"]
"""Creates or updates a private dns zone group in the specified private endpoint.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param private_endpoint_name: The name of the private endpoint.
:type private_endpoint_name: str
:param private_dns_zone_group_name: The name of the private dns zone group.
:type private_dns_zone_group_name: str
:param parameters: Parameters supplied to the create or update private dns zone group
operation.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.PrivateDnsZoneGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateDnsZoneGroup or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_11_01.models.PrivateDnsZoneGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateDnsZoneGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateDnsZoneGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'privateDnsZoneGroupName': self._serialize.url("private_dns_zone_group_name", private_dns_zone_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}/privateDnsZoneGroups/{privateDnsZoneGroupName}'} # type: ignore
def list(
self,
private_endpoint_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PrivateDnsZoneGroupListResult"]
"""Gets all private dns zone groups in a private endpoint.
:param private_endpoint_name: The name of the private endpoint.
:type private_endpoint_name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateDnsZoneGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_11_01.models.PrivateDnsZoneGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateDnsZoneGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateDnsZoneGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}/privateDnsZoneGroups'} # type: ignore
| |
import bisect
import copy
import inspect
from collections import defaultdict
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db import connections
from django.db.models import AutoField, Manager, OrderWrt, UniqueConstraint
from django.db.models.query_utils import PathInfo
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.functional import cached_property
from django.utils.text import camel_case_to_spaces, format_lazy
from django.utils.translation import override
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = ()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = (
'verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by', 'order_with_respect_to',
'app_label', 'db_tablespace', 'abstract', 'managed', 'proxy', 'swappable',
'auto_created', 'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name', 'required_db_features',
'required_db_vendor', 'base_manager_name', 'default_manager_name',
'indexes', 'constraints',
)
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = option_together[0]
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
class Options:
FORWARD_PROPERTIES = {
'fields', 'many_to_many', 'concrete_fields', 'local_concrete_fields',
'_forward_fields_map', 'managers', 'managers_map', 'base_manager',
'default_manager',
}
REVERSE_PROPERTIES = {'related_objects', 'fields_map', '_relation_tree'}
default_apps = apps
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.local_fields = []
self.local_many_to_many = []
self.private_fields = []
self.local_managers = []
self.base_manager_name = None
self.default_manager_name = None
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.indexes = []
self.constraints = []
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete', 'view')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = {}
self.auto_created = False
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = self.default_apps
self.default_related_name = None
@property
def label(self):
return '%s.%s' % (self.app_label, self.object_name)
@property
def label_lower(self):
return '%s.%s' % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# App label/class name interpolation for names of constraints and
# indexes.
if not getattr(cls._meta, 'abstract', False):
for attr_name in {'constraints', 'indexes'}:
objs = getattr(self, attr_name, [])
setattr(self, attr_name, self._format_names_with_class(cls, objs))
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs))
else:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _format_names_with_class(self, cls, objs):
"""App label/class name interpolation for object names."""
new_objs = []
for obj in objs:
obj = obj.clone()
obj.name = obj.name % {
'app_label': cls._meta.app_label.lower(),
'class': cls.__name__.lower(),
}
new_objs.append(obj)
return new_objs
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(iter(self.parents.values()))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True, auto_created=True)
model.add_to_class('id', auto)
def add_manager(self, manager):
self.local_managers.append(manager)
self._expire_cache()
def add_field(self, field, private=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if private:
self.private_fields.append(field)
elif field.is_relation and field.many_to_many:
bisect.insort(self.local_many_to_many, field)
else:
bisect.insort(self.local_fields, field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Do the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return self.label_lower
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, str):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(getattr(connection.features, feat, False)
for feat in self.required_db_features)
return True
@property
def verbose_name_raw(self):
"""Return the untranslated verbose name."""
with override(None):
return str(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower:
return swapped_for
return None
@cached_property
def managers(self):
managers = []
seen_managers = set()
bases = (b for b in self.model.mro() if hasattr(b, '_meta'))
for depth, base in enumerate(bases):
for manager in base._meta.local_managers:
if manager.name in seen_managers:
continue
manager = copy.copy(manager)
manager.model = self.model
seen_managers.add(manager.name)
managers.append((depth, manager.creation_counter, manager))
return make_immutable_fields_list(
"managers",
(m[2] for m in sorted(managers)),
)
@cached_property
def managers_map(self):
return {manager.name: manager for manager in self.managers}
@cached_property
def base_manager(self):
base_manager_name = self.base_manager_name
if not base_manager_name:
# Get the first parent's base_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
if parent._base_manager.name != '_base_manager':
base_manager_name = parent._base_manager.name
break
if base_manager_name:
try:
return self.managers_map[base_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
base_manager_name,
)
)
manager = Manager()
manager.name = '_base_manager'
manager.model = self.model
manager.auto_created = True
return manager
@cached_property
def default_manager(self):
default_manager_name = self.default_manager_name
if not default_manager_name and not self.local_managers:
# Get the first parent's default_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
default_manager_name = parent._meta.default_manager_name
break
if default_manager_name:
try:
return self.managers_map[default_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
default_manager_name,
)
)
if self.managers:
return self.managers[0]
@cached_property
def fields(self):
"""
Return a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not private or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
def is_not_an_m2m_field(f):
return not (f.is_relation and f.many_to_many)
def is_not_a_generic_relation(f):
return not (f.is_relation and f.one_to_many)
def is_not_a_generic_foreign_key(f):
return not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False)
if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Return a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Return a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@cached_property
def many_to_many(self):
"""
Return a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False) if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Return all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many)
)
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name):
"""
Return a field instance given the name of a forward or reverse field.
"""
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
return self._forward_fields_map[field_name]
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named '%s'. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, field_name))
def get_base_chain(self, model):
"""
Return a list of parent classes leading to `model` (ordered from
closest to most distant ancestor). This has to handle the case where
`model` is a grandparent or even more distant relation.
"""
if not self.parents:
return []
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return []
def get_parent_list(self):
"""
Return all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Return the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Return None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_path_to_parent(self, parent):
"""
Return a list of PathInfos containing the path from the current
model to the parent model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
# Skip the chain of proxy to the concrete proxied model.
proxied_model = self.concrete_model
path = []
opts = self
for int_model in self.get_base_chain(parent):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.remote_field.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(
from_opts=final_field.model._meta,
to_opts=opts,
target_fields=targets,
join_field=final_field,
m2m=False,
direct=True,
filtered_relation=None,
))
return path
def get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent)
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for i, ancestor in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.get_reverse_path_info())
return path
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
opts = model._meta
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if opts.abstract:
continue
fields_with_relations = (
f for f in opts._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, str):
remote_label = f.remote_field.model._meta.concrete_model._meta.label
related_objects_graph[remote_label].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta.concrete_model._meta.label]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
if forward:
for cache_key in self.FORWARD_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
if reverse and not self.abstract:
for cache_key in self.REVERSE_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Return a list of fields associated to the model. By default, include
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = seen_models is None
if topmost_call:
seen_models = set()
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if not getattr(obj, 'parent_link', False) or obj.model == self.concrete_model:
fields.append(obj)
if reverse and not self.proxy:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields += self.local_fields
fields += self.local_many_to_many
# Private fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the private fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields += self.private_fields
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
@cached_property
def total_unique_constraints(self):
"""
Return a list of total unique constraints. Useful for determining set
of fields guaranteed to be unique for all rows.
"""
return [
constraint
for constraint in self.constraints
if isinstance(constraint, UniqueConstraint) and constraint.condition is None
]
@cached_property
def _property_names(self):
"""Return a set of the names of the properties defined on the model."""
names = []
for name in dir(self.model):
attr = inspect.getattr_static(self.model, name)
if isinstance(attr, property):
names.append(name)
return frozenset(names)
@cached_property
def db_returning_fields(self):
"""
Private API intended only to be used by Django itself.
Fields to be returned after a database insert.
"""
return [
field for field in self._get_fields(forward=True, reverse=False, include_parents=PROXY_PARENTS)
if getattr(field, 'db_returning', False)
]
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AzureFirewallsOperations:
"""AzureFirewallsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
azure_firewall_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
azure_firewall_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
azure_firewall_name=azure_firewall_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def get(
self,
resource_group_name: str,
azure_firewall_name: str,
**kwargs: Any
) -> "_models.AzureFirewall":
"""Gets the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureFirewall, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.AzureFirewall
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
azure_firewall_name: str,
parameters: "_models.AzureFirewall",
**kwargs: Any
) -> "_models.AzureFirewall":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AzureFirewall')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
azure_firewall_name: str,
parameters: "_models.AzureFirewall",
**kwargs: Any
) -> AsyncLROPoller["_models.AzureFirewall"]:
"""Creates or updates the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:param parameters: Parameters supplied to the create or update Azure Firewall operation.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.AzureFirewall
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AzureFirewall or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.AzureFirewall]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
azure_firewall_name=azure_firewall_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AzureFirewallListResult"]:
"""Lists all Azure Firewalls in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.AzureFirewallListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewallListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.AzureFirewallListResult"]:
"""Gets all the Azure Firewalls in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.AzureFirewallListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewallListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/azureFirewalls'} # type: ignore
| |
# -*- coding: utf-8 -*-
""" PolymorphicQuerySet support functions
Please see README.rst or DOCS.rst or http://chrisglass.github.com/django_polymorphic/
"""
from __future__ import absolute_import
import copy
import django
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q, FieldDoesNotExist
from django.db.utils import DEFAULT_DB_ALIAS
from django.db.models.fields.related import RelatedField
if django.VERSION < (1, 6):
# There was no common base class in Django 1.5, mention all variants here.
from django.db.models.fields.related import RelatedObject, ManyToOneRel, ManyToManyRel
REL_FIELD_CLASSES = (RelatedField, RelatedObject, ManyToOneRel, ManyToManyRel) # Leaving GenericRel out.
elif django.VERSION < (1, 8):
# As of Django 1.6 there is a ForeignObjectRel.
from django.db.models.fields.related import ForeignObjectRel, RelatedObject
REL_FIELD_CLASSES = (RelatedField, ForeignObjectRel, RelatedObject)
else:
# As of Django 1.8 the base class serves everything. RelatedObject is gone.
from django.db.models.fields.related import ForeignObjectRel
REL_FIELD_CLASSES = (RelatedField, ForeignObjectRel)
from functools import reduce
###################################################################################
# PolymorphicQuerySet support functions
# These functions implement the additional filter- and Q-object functionality.
# They form a kind of small framework for easily adding more
# functionality to filters and Q objects.
# Probably a more general queryset enhancement class could be made out of them.
def translate_polymorphic_filter_definitions_in_kwargs(queryset_model, kwargs, using=DEFAULT_DB_ALIAS):
"""
Translate the keyword argument list for PolymorphicQuerySet.filter()
Any kwargs with special polymorphic functionality are replaced in the kwargs
dict with their vanilla django equivalents.
For some kwargs a direct replacement is not possible, as a Q object is needed
instead to implement the required functionality. In these cases the kwarg is
deleted from the kwargs dict and a Q object is added to the return list.
Modifies: kwargs dict
Returns: a list of non-keyword-arguments (Q objects) to be added to the filter() query.
"""
additional_args = []
for field_path, val in kwargs.copy().items(): # Python 3 needs copy
new_expr = _translate_polymorphic_filter_definition(queryset_model, field_path, val, using=using)
if type(new_expr) == tuple:
# replace kwargs element
del(kwargs[field_path])
kwargs[new_expr[0]] = new_expr[1]
elif isinstance(new_expr, models.Q):
del(kwargs[field_path])
additional_args.append(new_expr)
return additional_args
def translate_polymorphic_Q_object(queryset_model, potential_q_object, using=DEFAULT_DB_ALIAS):
def tree_node_correct_field_specs(my_model, node):
" process all children of this Q node "
for i in range(len(node.children)):
child = node.children[i]
if type(child) == tuple:
# this Q object child is a tuple => a kwarg like Q( instance_of=ModelB )
key, val = child
new_expr = _translate_polymorphic_filter_definition(my_model, key, val, using=using)
if new_expr:
node.children[i] = new_expr
else:
# this Q object child is another Q object, recursively process this as well
tree_node_correct_field_specs(my_model, child)
if isinstance(potential_q_object, models.Q):
tree_node_correct_field_specs(queryset_model, potential_q_object)
return potential_q_object
def translate_polymorphic_filter_definitions_in_args(queryset_model, args, using=DEFAULT_DB_ALIAS):
"""
Translate the non-keyword argument list for PolymorphicQuerySet.filter()
In the args list, we return all kwargs to Q-objects that contain special
polymorphic functionality with their vanilla django equivalents.
We traverse the Q object tree for this (which is simple).
Returns: modified Q objects
"""
if django.VERSION >= (1, 10):
q_objects = [copy.deepcopy(q) for q in args]
elif django.VERSION >= (1, 6):
q_objects = [q.clone() for q in args]
else:
q_objects = args # NOTE: edits existing objects in place.
return [translate_polymorphic_Q_object(queryset_model, q, using=using) for q in q_objects]
def _translate_polymorphic_filter_definition(queryset_model, field_path, field_val, using=DEFAULT_DB_ALIAS):
"""
Translate a keyword argument (field_path=field_val), as used for
PolymorphicQuerySet.filter()-like functions (and Q objects).
A kwarg with special polymorphic functionality is translated into
its vanilla django equivalent, which is returned, either as tuple
(field_path, field_val) or as Q object.
Returns: kwarg tuple or Q object or None (if no change is required)
"""
# handle instance_of expressions or alternatively,
# if this is a normal Django filter expression, return None
if field_path == 'instance_of':
return _create_model_filter_Q(field_val, using=using)
elif field_path == 'not_instance_of':
return _create_model_filter_Q(field_val, not_instance_of=True, using=using)
elif not '___' in field_path:
return None # no change
# filter expression contains '___' (i.e. filter for polymorphic field)
# => get the model class specified in the filter expression
newpath = translate_polymorphic_field_path(queryset_model, field_path)
return (newpath, field_val)
def translate_polymorphic_field_path(queryset_model, field_path):
"""
Translate a field path from a keyword argument, as used for
PolymorphicQuerySet.filter()-like functions (and Q objects).
Supports leading '-' (for order_by args).
E.g.: if queryset_model is ModelA, then "ModelC___field3" is translated
into modela__modelb__modelc__field3.
Returns: translated path (unchanged, if no translation needed)
"""
classname, sep, pure_field_path = field_path.partition('___')
if not sep:
return field_path
assert classname, 'PolymorphicModel: %s: bad field specification' % field_path
negated = False
if classname[0] == '-':
negated = True
classname = classname.lstrip('-')
if '__' in classname:
# the user has app label prepended to class name via __ => use Django's get_model function
appname, sep, classname = classname.partition('__')
model = models.get_model(appname, classname)
assert model, 'PolymorphicModel: model %s (in app %s) not found!' % (model.__name__, appname)
if not issubclass(model, queryset_model):
e = 'PolymorphicModel: queryset filter error: "' + model.__name__ + '" is not derived from "' + queryset_model.__name__ + '"'
raise AssertionError(e)
else:
# the user has only given us the class name via ___
# => select the model from the sub models of the queryset base model
# Test whether it's actually a regular relation__ _fieldname (the field starting with an _)
# so no tripple ClassName___field was intended.
try:
if django.VERSION >= (1, 8):
# This also retreives M2M relations now (including reverse foreign key relations)
field = queryset_model._meta.get_field(classname)
else:
field = queryset_model._meta.get_field_by_name(classname)[0]
if isinstance(field, REL_FIELD_CLASSES):
# Can also test whether the field exists in the related object to avoid ambiguity between
# class names and field names, but that never happens when your class names are in CamelCase.
return field_path # No exception raised, field does exist.
except FieldDoesNotExist:
pass
# function to collect all sub-models, this should be optimized (cached)
def add_all_sub_models(model, result):
if issubclass(model, models.Model) and model != models.Model:
# model name is occurring twice in submodel inheritance tree => Error
if model.__name__ in result and model != result[model.__name__]:
e = 'PolymorphicModel: model name alone is ambiguous: %s.%s and %s.%s!\n'
e += 'In this case, please use the syntax: applabel__ModelName___field'
assert model, e % (
model._meta.app_label, model.__name__,
result[model.__name__]._meta.app_label, result[model.__name__].__name__)
result[model.__name__] = model
for b in model.__subclasses__():
add_all_sub_models(b, result)
submodels = {}
add_all_sub_models(queryset_model, submodels)
model = submodels.get(classname, None)
assert model, 'PolymorphicModel: model %s not found (not a subclass of %s)!' % (classname, queryset_model.__name__)
# create new field path for expressions, e.g. for baseclass=ModelA, myclass=ModelC
# 'modelb__modelc" is returned
def _create_base_path(baseclass, myclass):
bases = myclass.__bases__
for b in bases:
if b == baseclass:
return myclass.__name__.lower()
path = _create_base_path(baseclass, b)
if path:
return path + '__' + myclass.__name__.lower()
return ''
basepath = _create_base_path(queryset_model, model)
if negated:
newpath = '-'
else:
newpath = ''
newpath += basepath
if basepath:
newpath += '__'
newpath += pure_field_path
return newpath
def _create_model_filter_Q(modellist, not_instance_of=False, using=DEFAULT_DB_ALIAS):
"""
Helper function for instance_of / not_instance_of
Creates and returns a Q object that filters for the models in modellist,
including all subclasses of these models (as we want to do the same
as pythons isinstance() ).
.
We recursively collect all __subclasses__(), create a Q filter for each,
and or-combine these Q objects. This could be done much more
efficiently however (regarding the resulting sql), should an optimization
be needed.
"""
if not modellist:
return None
from .models import PolymorphicModel
if type(modellist) != list and type(modellist) != tuple:
if issubclass(modellist, PolymorphicModel):
modellist = [modellist]
else:
assert False, 'PolymorphicModel: instance_of expects a list of (polymorphic) models or a single (polymorphic) model'
def q_class_with_subclasses(model):
q = Q(polymorphic_ctype=ContentType.objects.db_manager(using).get_for_model(model, for_concrete_model=False))
for subclass in model.__subclasses__():
q = q | q_class_with_subclasses(subclass)
return q
qlist = [q_class_with_subclasses(m) for m in modellist]
q_ored = reduce(lambda a, b: a | b, qlist)
if not_instance_of:
q_ored = ~q_ored
return q_ored
| |
# -*- coding: utf-8 -*-
"""
Survey builder functionality.
"""
import copy
import os
import re
from pyxform import file_utils, utils
from pyxform.errors import PyXFormError
from pyxform.external_instance import ExternalInstance
from pyxform.question import (
InputQuestion,
MultipleChoiceQuestion,
OsmUploadQuestion,
Question,
RangeQuestion,
TriggerQuestion,
UploadQuestion,
)
from pyxform.question_type_dictionary import QUESTION_TYPE_DICT
from pyxform.section import GroupedSection, RepeatingSection
from pyxform.survey import Survey
from pyxform.xls2json import SurveyReader
def copy_json_dict(json_dict):
"""
Returns a deep copy of the input json_dict
"""
json_dict_copy = None
items = None
if type(json_dict) is list:
json_dict_copy = [None] * len(json_dict)
items = enumerate(json_dict)
elif type(json_dict) is dict:
json_dict_copy = {}
items = json_dict.items()
for key, value in items:
if type(value) is dict or type(value) is list:
json_dict_copy[key] = copy_json_dict(value)
else:
json_dict_copy[key] = value
return json_dict_copy
class SurveyElementBuilder:
# we use this CLASSES dict to create questions from dictionaries
QUESTION_CLASSES = {
"": Question,
"action": Question,
"input": InputQuestion,
"odk:rank": MultipleChoiceQuestion,
"osm": OsmUploadQuestion,
"range": RangeQuestion,
"select": MultipleChoiceQuestion,
"select1": MultipleChoiceQuestion,
"trigger": TriggerQuestion,
"upload": UploadQuestion,
}
SECTION_CLASSES = {
"group": GroupedSection,
"repeat": RepeatingSection,
"survey": Survey,
}
def __init__(self, **kwargs):
# I don't know why we would need an explicit none option for
# select alls
self._add_none_option = False
self.set_sections(kwargs.get("sections", {}))
# dictionary of setvalue target and value tuple indexed by triggering element
self.setvalues_by_triggering_ref = {}
def set_sections(self, sections):
"""
sections is a dict of python objects, a key in this dict is
the name of the section and the value is a dict that can be
used to create a whole survey.
"""
assert type(sections) == dict
self._sections = sections
def create_survey_element_from_dict(self, d):
"""
Convert from a nested python dictionary/array structure (a json dict I
call it because it corresponds directly with a json object)
to a survey object
"""
if "add_none_option" in d:
self._add_none_option = d["add_none_option"]
if d["type"] in self.SECTION_CLASSES:
section = self._create_section_from_dict(d)
if d["type"] == "survey":
section.setvalues_by_triggering_ref = self.setvalues_by_triggering_ref
return section
elif d["type"] == "loop":
return self._create_loop_from_dict(d)
elif d["type"] == "include":
section_name = d["name"]
if section_name not in self._sections:
raise PyXFormError(
"This section has not been included.",
section_name,
self._sections.keys(),
)
d = self._sections[section_name]
full_survey = self.create_survey_element_from_dict(d)
return full_survey.children
elif d["type"] in ["xml-external", "csv-external"]:
return ExternalInstance(**d)
else:
self._save_trigger_as_setvalue_and_remove_calculate(d)
return self._create_question_from_dict(
d, copy_json_dict(QUESTION_TYPE_DICT), self._add_none_option
)
def _save_trigger_as_setvalue_and_remove_calculate(self, d):
if "trigger" in d:
triggering_ref = re.sub(r"\s+", "", d["trigger"])
value = ""
if "bind" in d and "calculate" in d["bind"]:
value = d["bind"]["calculate"]
if triggering_ref in self.setvalues_by_triggering_ref:
self.setvalues_by_triggering_ref[triggering_ref].append(
(d["name"], value)
)
else:
self.setvalues_by_triggering_ref[triggering_ref] = [(d["name"], value)]
@staticmethod
def _create_question_from_dict(d, question_type_dictionary, add_none_option=False):
question_type_str = d["type"]
d_copy = d.copy()
# TODO: Keep add none option?
if add_none_option and question_type_str.startswith("select all that apply"):
SurveyElementBuilder._add_none_option_to_select_all_that_apply(d_copy)
# Handle or_other on select type questions
or_other_str = " or specify other"
if question_type_str.endswith(or_other_str):
question_type_str = question_type_str[
: len(question_type_str) - len(or_other_str)
]
d_copy["type"] = question_type_str
SurveyElementBuilder._add_other_option_to_multiple_choice_question(d_copy)
return [
SurveyElementBuilder._create_question_from_dict(
d_copy, question_type_dictionary, add_none_option
),
SurveyElementBuilder._create_specify_other_question_from_dict(d_copy),
]
question_class = SurveyElementBuilder._get_question_class(
question_type_str, question_type_dictionary
)
# todo: clean up this spaghetti code
d_copy["question_type_dictionary"] = question_type_dictionary
if question_class:
return question_class(**d_copy)
return []
@staticmethod
def _add_other_option_to_multiple_choice_question(d):
# ideally, we'd just be pulling from children
choice_list = d.get("choices", d.get("children", []))
if len(choice_list) <= 0:
raise PyXFormError("There should be choices for this question.")
other_choice = {"name": "other", "label": "Other"}
if other_choice not in choice_list:
choice_list.append(other_choice)
@staticmethod
def _add_none_option_to_select_all_that_apply(d_copy):
choice_list = d_copy.get("choices", d_copy.get("children", []))
if len(choice_list) <= 0:
raise PyXFormError("There should be choices for this question.")
none_choice = {"name": "none", "label": "None"}
if none_choice not in choice_list:
choice_list.append(none_choice)
none_constraint = "(.='none' or not(selected(., 'none')))"
if "bind" not in d_copy:
d_copy["bind"] = {}
if "constraint" in d_copy["bind"]:
d_copy["bind"]["constraint"] += " and " + none_constraint
else:
d_copy["bind"]["constraint"] = none_constraint
@staticmethod
def _get_question_class(question_type_str, question_type_dictionary):
"""
Read the type string from the json format,
and find what class it maps to going through
type_dictionary -> QUESTION_CLASSES
"""
question_type = question_type_dictionary.get(question_type_str, {})
control_dict = question_type.get("control", {})
control_tag = control_dict.get("tag", "")
if control_tag == "upload" and control_dict.get("mediatype") == "osm/*":
control_tag = "osm"
return SurveyElementBuilder.QUESTION_CLASSES[control_tag]
@staticmethod
def _create_specify_other_question_from_dict(d):
kwargs = {
"type": "text",
"name": "%s_other" % d["name"],
"label": "Specify other.",
"bind": {"relevant": "selected(../%s, 'other')" % d["name"]},
}
return InputQuestion(**kwargs)
def _create_section_from_dict(self, d):
d_copy = d.copy()
children = d_copy.pop("children", [])
section_class = self.SECTION_CLASSES[d_copy["type"]]
if d["type"] == "survey" and "title" not in d:
d_copy["title"] = d["name"]
result = section_class(**d_copy)
for child in children:
# Deep copying the child is a hacky solution to the or_other bug.
# I don't know why it works.
# And I hope it doesn't break something else.
# I think the good solution would be to rewrite this class.
survey_element = self.create_survey_element_from_dict(copy.deepcopy(child))
if survey_element:
result.add_children(survey_element)
return result
def _create_loop_from_dict(self, d):
"""
Takes a json_dict of "loop" type
Returns a GroupedSection
"""
d_copy = d.copy()
children = d_copy.pop("children", [])
columns = d_copy.pop("columns", [])
result = GroupedSection(**d_copy)
# columns is a left over from when this was
# create_table_from_dict, I will need to clean this up
for column_dict in columns:
# If this is a none option for a select all that apply
# question then we should skip adding it to the result
if column_dict["name"] == "none":
continue
column = GroupedSection(**column_dict)
for child in children:
question_dict = self._name_and_label_substitutions(child, column_dict)
question = self.create_survey_element_from_dict(question_dict)
column.add_child(question)
result.add_child(column)
if result.name != "":
return result
# TODO: Verify that nothing breaks if this returns a list
return result.children
def _name_and_label_substitutions(self, question_template, column_headers):
# if the label in column_headers has multiple languages setup a
# dictionary by language to do substitutions.
info_by_lang = {}
if type(column_headers["label"]) == dict:
info_by_lang = dict(
[
(
lang,
{
"name": column_headers["name"],
"label": column_headers["label"][lang],
},
)
for lang in column_headers["label"].keys()
]
)
result = question_template.copy()
for key in result.keys():
if type(result[key]) == str:
result[key] %= column_headers
elif type(result[key]) == dict:
result[key] = result[key].copy()
for key2 in result[key].keys():
if type(column_headers["label"]) == dict:
result[key][key2] %= info_by_lang.get(key2, column_headers)
else:
result[key][key2] %= column_headers
return result
def create_survey_element_from_json(self, str_or_path):
d = utils.get_pyobj_from_json(str_or_path)
return self.create_survey_element_from_dict(d)
def create_survey_element_from_dict(d, sections=None):
"""
Creates a Survey from a dictionary in the format provided by SurveyReader
"""
if sections is None:
sections = {}
builder = SurveyElementBuilder()
builder.set_sections(sections)
return builder.create_survey_element_from_dict(d)
def create_survey_element_from_json(str_or_path):
d = utils.get_pyobj_from_json(str_or_path)
return create_survey_element_from_dict(d)
def create_survey_from_xls(path_or_file, default_name=None):
excel_reader = SurveyReader(path_or_file, default_name=default_name)
d = excel_reader.to_json_dict()
survey = create_survey_element_from_dict(d)
if not survey.id_string:
survey.id_string = excel_reader._name
return survey
def create_survey(
name_of_main_section=None,
sections=None,
main_section=None,
id_string=None,
title=None,
default_language=None,
):
"""
name_of_main_section -- a string key used to find the main section in the
sections dict if it is not supplied in the
main_section arg
main_section -- a json dict that represents a survey
sections -- a dictionary of sections that can be drawn from to build the
survey
This function uses the builder class to create and return a survey.
"""
if sections is None:
sections = {}
if main_section is None:
main_section = sections[name_of_main_section]
builder = SurveyElementBuilder()
builder.set_sections(sections)
# assert name_of_main_section in sections, name_of_main_section
if "id_string" not in main_section:
main_section["id_string"] = (
name_of_main_section if id_string is None else name_of_main_section
)
survey = builder.create_survey_element_from_dict(main_section)
# not sure where to do this without repeating ourselves,
# but it's needed to pass xls2xform tests
# TODO: I would assume that the json-dict is valid
# (i.e. that it has a id string), then throw an error here.
# We can set the id to whatever we want in xls2json.
# Although to be totally modular, maybe we do need to repeat a lot
# of the validation and setting default value stuff from xls2json
if id_string is not None:
survey.id_string = id_string
if title is not None:
survey.title = title
survey.def_lang = default_language
return survey
def create_survey_from_path(path, include_directory=False):
"""
include_directory -- Switch to indicate that all the survey forms in the
same directory as the specified file should be read
so they can be included through include types.
@see: create_survey
"""
directory, file_name = os.path.split(path)
if include_directory:
main_section_name = file_utils._section_name(file_name)
sections = file_utils.collect_compatible_files_in_directory(directory)
else:
main_section_name, section = file_utils.load_file_to_dict(path)
sections = {main_section_name: section}
pkg = {"name_of_main_section": main_section_name, "sections": sections}
return create_survey(**pkg)
| |
from gii.core import signals
from gii.qt.helpers import restrainWidgetToScreen
from gii.qt.IconCache import getIcon
from PyQt4 import QtCore, QtGui, uic
from PyQt4.QtCore import Qt
from ToolWindowManager import ToolWindowManager
def getWindowScreenId(window):
desktop=QtGui.QApplication.desktop()
return desktop.screenNumber(window)
def moveWindowToCenter(window):
desktop=QtGui.QApplication.desktop()
geom=desktop.availableGeometry(window)
x=(geom.width()-window.width())/2 +geom.x()
y=(geom.height()-window.height())/2+geom.y()
window.move(x,y)
##----------------------------------------------------------------##
class MainWindow(QtGui.QMainWindow):
"""docstring for MainWindow"""
def __init__(self, parent):
super(MainWindow, self).__init__(parent)
# self.setDocumentMode(True)
self.defaultToolBarIconSize = 16
self.setUnifiedTitleAndToolBarOnMac( False )
self.setDockOptions(
QtGui.QMainWindow.AllowNestedDocks | QtGui.QMainWindow.AllowTabbedDocks )
# self.setTabPosition( Qt.AllDockWidgetAreas, QtGui.QTabWidget.North)
font=QtGui.QFont()
font.setPointSize(11)
self.setFont(font)
self.setIconSize( QtCore.QSize( 16, 16 ) )
self.setFocusPolicy( Qt.WheelFocus )
self.centerTabWidget = QtGui.QTabWidget( self )
self.setCentralWidget( self.centerTabWidget )
self.centerTabWidget.currentChanged.connect( self.onDocumentTabChanged )
# self.centerTabWidget.setDocumentMode(True)
self.centerTabWidget.setMovable(True)
self.centerTabWidget.setTabsClosable(True)
self.centerTabWidget.tabCloseRequested.connect( self.onTabCloseRequested )
# self.toolWindowMgr = ToolWindowManager( self )
# self.setCentralWidget( self.toolWindowMgr )
self.resetCorners()
def resetCorners( self ):
self.setCorner( Qt.TopLeftCorner, Qt.LeftDockWidgetArea )
self.setCorner( Qt.BottomLeftCorner, Qt.BottomDockWidgetArea )
self.setCorner( Qt.TopRightCorner, Qt.RightDockWidgetArea )
self.setCorner( Qt.BottomRightCorner, Qt.RightDockWidgetArea )
def moveToCenter(self):
moveWindowToCenter(self)
def ensureVisible(self):
restrainWidgetToScreen(self)
def startTimer(self, fps, trigger):
assert(hasattr(trigger,'__call__'))
interval = 1000/fps
timer = QtCore.QTimer(self)
timer.timeout.connect(trigger)
timer.start(interval)
return timer
def requestSubWindow(self, id, **windowOption ):
title = windowOption.get('title',id)
window = SubWindow(self)
window.setWindowTitle(title)
window.windowMode = 'sub'
window.titleBase = title
minSize=windowOption.get('minSize',None)
if minSize:
window.setMinimumSize(*minSize)
# else:
# window.setMinimumSize(20,20)
maxSize=windowOption.get('minSize',None)
if maxSize:
window.setMaximumSize( *maxSize )
size=windowOption.get('size',None)
if size:
window.resize(*size)
return window
def requestDocumentWindow(self, id, **windowOption ):
title = windowOption.get('title',id)
window = DocumentWindow( self.centerTabWidget )
window.setWindowOptions( windowOption )
# window = DocumentWindow( self.toolWindowMgr )
# self.toolWindowMgr.addToolWindow( window, ToolWindowManager.EmptySpace )
window.parentWindow = self
window.setWindowTitle( title )
# self.centerTabWidget.addTab( window, title )
window.windowMode = 'tab'
window.titleBase = title
minSize = windowOption.get('minSize',None)
if minSize:
window.setMinimumSize(*minSize)
else:
window.setMinimumSize(20,20)
size = windowOption.get('size',None)
if size:
window.resize(*size)
return window
def requestDockWindow(self, id, **dockOptions ):
title=dockOptions.get( 'title', id )
dockArea=dockOptions.get('dock','left')
if dockArea=='left':
dockArea=Qt.LeftDockWidgetArea
elif dockArea=='right':
dockArea=Qt.RightDockWidgetArea
elif dockArea=='top':
dockArea=Qt.TopDockWidgetArea
elif dockArea=='bottom':
dockArea=Qt.BottomDockWidgetArea
elif dockArea=='main':
dockArea='center'
elif dockArea=='float':
dockArea = False
elif dockArea:
raise Exception('unsupported dock area:%s'%dockArea)
window=DockWindow(self)
if title:
window.setWindowTitle(title)
window.setObjectName('_dock_'+id)
window.windowMode = 'dock'
window.titleBase = title
if dockOptions.get( 'allowDock', True ):
window.setAllowedAreas( Qt.AllDockWidgetAreas )
else:
window.setAllowedAreas( Qt.NoDockWidgetArea )
dockArea = None
if dockArea and dockArea!='center':
self.addDockWidget(dockArea, window)
elif dockArea=='center':
self.setCentralWidget(window)
window.setFeatures(QtGui.QDockWidget.NoDockWidgetFeatures)
window.hideTitleBar()
else:
window.setFloating(True)
window.setupCustomTitleBar()
minSize=dockOptions.get('minSize',None)
if minSize:
window.setMinimumSize(*minSize)
else:
window.setMinimumSize(20,20)
size=dockOptions.get('size',None)
if size:
window.resize(*size)
if not dockOptions.get('autohide',False):
window._useWindowFlags()
window.dockOptions=dockOptions
return window
def requestToolWindow(self, id, **option ):
pass
def onTabCloseRequested( self, idx ):
subwindow = self.centerTabWidget.widget( idx )
if subwindow.close():
self.centerTabWidget.removeTab( idx )
def requestToolBar( self, name, **options ):
toolbar = QtGui.QToolBar()
toolbar.setFloatable( options.get( 'floatable', False ) )
toolbar.setMovable( options.get( 'movable', True ) )
toolbar.setObjectName( 'toolbar-%s' % name )
iconSize = options.get('icon_size', self.defaultToolBarIconSize )
self.addToolBar( toolbar )
toolbar.setIconSize( QtCore.QSize( iconSize, iconSize ) )
toolbar._icon_size = iconSize
return toolbar
def onDocumentTabChanged( self, idx ):
w = self.centerTabWidget.currentWidget()
if w: w.setFocus()
##----------------------------------------------------------------##
class SubWindowMixin:
def setWindowOptions( self, options ):
self.windowOptions = options
def getWindowOption( self, key, default = None ):
if hasattr( self, 'windowOptions' ):
return self.windowOptions.get( key, default )
else:
return None
def setDocumentName( self, name ):
self.documentName = name
if name:
title = '%s - %s' % ( self.documentName, self.titleBase )
self.setWindowTitle( title )
else:
self.setWindowTitle( self.titleBase )
def setCallbackOnClose( self, callback ):
self.callbackOnClose = callback
def setupUi(self):
self.callbackOnClose = None
self.container = self.createContainer()
self.mainLayout = QtGui.QVBoxLayout(self.container)
self.mainLayout.setSpacing(0)
self.mainLayout.setMargin(0)
self.mainLayout.setObjectName('MainLayout')
def createContainer(self):
container = QtGui.QWidget(self)
self.setWidget(container)
return container
def addWidget(self, widget, **layoutOption):
# widget.setParent(self)
if layoutOption.get('fixed', False):
widget.setSizePolicy(
QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Fixed
)
elif layoutOption.get('expanding', True):
widget.setSizePolicy(
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding
)
self.mainLayout.addWidget(widget)
return widget
def addWidgetFromFile(self, uiFile, **layoutOption):
form = uic.loadUi(uiFile)
return self.addWidget(form, **layoutOption)
def moveToCenter(self):
moveWindowToCenter(self)
def ensureVisible(self):
restrainWidgetToScreen(self)
def onClose( self ):
if self.callbackOnClose:
return self.callbackOnClose()
return True
##----------------------------------------------------------------##
class SubWindow(QtGui.QMainWindow, SubWindowMixin):
def __init__(self, parent):
super(SubWindow, self).__init__(parent)
self.setupUi()
self.stayOnTop = False
self.setFocusPolicy( Qt.WheelFocus )
def hideTitleBar(self):
pass
# emptyTitle=QtGui.QWidget()
# self.setTitleBarWidget(emptyTitle)
def createContainer(self):
container=QtGui.QWidget(self)
self.setCentralWidget(container)
return container
def startTimer(self, fps, trigger):
assert(hasattr(trigger,'__call__'))
interval = 1000/fps
timer=QtCore.QTimer(self)
timer.timeout.connect(trigger)
timer.start(interval)
return timer
def focusOutEvent(self, event):
pass
def focusInEvent(self, event):
pass
def closeEvent( self, event ):
if self.onClose():
return super( SubWindow, self ).closeEvent( event )
else:
event.ignore()
##----------------------------------------------------------------##
class DocumentWindow( SubWindow ):
def show( self, *args ):
tab = self.parentWindow.centerTabWidget
idx = tab.indexOf( self )
if idx < 0:
idx = tab.addTab( self, self.windowTitle() )
iconPath = self.getWindowOption( 'icon' )
if iconPath:
tab.tabBar().setTabIcon( idx, getIcon( iconPath ) )
super( DocumentWindow, self ).show( *args )
tab.setCurrentIndex( idx )
def setWindowTitle( self, title ):
super( DocumentWindow, self ).setWindowTitle( title )
tabParent = self.parentWindow.centerTabWidget
idx = tabParent.indexOf( self )
tabParent.setTabText( idx, title )
def addToolBar(self):
return self.addWidget( QtGui.QToolBar(), expanding = False )
##----------------------------------------------------------------##
class DockWindowTitleBar( QtGui.QWidget ):
"""docstring for DockWindowTitleBar"""
def __init__(self, *args):
super(DockWindowTitleBar, self).__init__(*args)
self.setWindowFlags( Qt.Dialog )
def sizeHint(self):
return QtCore.QSize(20,15)
def minimumSizeHint(self):
return QtCore.QSize(20,15)
##----------------------------------------------------------------##
class DockWindow(QtGui.QDockWidget, SubWindowMixin):
"""docstring for DockWindow"""
def __init__(self, parent):
super(DockWindow, self).__init__(parent)
self.setupUi()
self.setupCustomTitleBar()
self.topLevelChanged.connect( self.onTopLevelChanged )
font = QtGui.QFont()
font.setPointSize(11)
self.setFont(font)
self.topLevel = False
self.stayOnTop = False
def setupCustomTitleBar(self):
self.originTitleBar = self.titleBarWidget()
self.customTitleBar = DockWindowTitleBar( self )
self.customTitleBar = self.originTitleBar
self.setTitleBarWidget( self.customTitleBar )
pass
def _useWindowFlags(self):
pass
def setStayOnTop( self, stayOnTop ):
self.stayOnTop = stayOnTop
if stayOnTop and self.topLevel:
self.setWindowFlags( Qt.Window | Qt.WindowStaysOnTopHint )
def hideTitleBar(self):
emptyTitle = QtGui.QWidget()
self.setTitleBarWidget(emptyTitle)
def startTimer(self, fps, trigger):
assert(hasattr(trigger,'__call__'))
interval = 1000/fps
timer=QtCore.QTimer(self)
timer.timeout.connect(trigger)
timer.start(interval)
return timer
def onTopLevelChanged(self, toplevel):
self.topLevel = toplevel
if toplevel:
self.setTitleBarWidget( self.originTitleBar )
flag = Qt.Window
if self.stayOnTop:
flag |= Qt.WindowStaysOnTopHint
self.setWindowFlags( flag )
self.show()
else:
self.setTitleBarWidget( self.customTitleBar )
pass
def addToolBar(self):
return self.addWidget( QtGui.QToolBar(), expanding = False )
def closeEvent( self, event ):
if self.onClose():
return super( DockWindow, self ).closeEvent( event )
else:
event.ignore()
| |
import json
import datetime
import requests
import pytz
import re
from datetime import timedelta
from functools import wraps
from flask import make_response, request, current_app
from functools import update_wrapper
from alerta.app import app, db
from alerta.app.metrics import Timer
from alerta.plugins import load_plugins, RejectException
LOG = app.logger
plugins = load_plugins()
duplicate_timer = Timer('alerts', 'duplicate', 'Duplicate alerts', 'Total time to process number of duplicate alerts')
correlate_timer = Timer('alerts', 'correlate', 'Correlated alerts', 'Total time to process number of correlated alerts')
create_timer = Timer('alerts', 'create', 'Newly created alerts', 'Total time to process number of new alerts')
class DateEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.date, datetime.datetime)):
return obj.replace(microsecond=0).strftime('%Y-%m-%dT%H:%M:%S') + ".%03dZ" % (obj.microsecond // 1000)
else:
return json.JSONEncoder.default(self, obj)
# Over-ride jsonify to support Date Encoding
def jsonify(*args, **kwargs):
return current_app.response_class(json.dumps(dict(*args, **kwargs), cls=DateEncoder,
indent=None if request.is_xhr else 2), mimetype='application/json')
def jsonp(func):
"""Wraps JSONified output for JSONP requests."""
@wraps(func)
def decorated(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
data = str(func(*args, **kwargs).data)
content = str(callback) + '(' + data + ')'
mimetype = 'application/javascript'
return current_app.response_class(content, mimetype=mimetype)
else:
return func(*args, **kwargs)
return decorated
PARAMS_EXCLUDE = [
'_',
'callback',
'token',
'api-key'
]
def parse_fields(r):
query_time = datetime.datetime.utcnow()
params = r.args.copy()
for s in PARAMS_EXCLUDE:
if s in params:
del params[s]
if params.get('q', None):
query = json.loads(params['q'])
del params['q']
else:
query = dict()
page = params.get('page', 1)
if 'page' in params:
del params['page']
page = int(page)
if params.get('from-date', None):
try:
from_date = datetime.datetime.strptime(params['from-date'], '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError, e:
LOG.warning('Could not parse from-date query parameter: %s', e)
raise
from_date = from_date.replace(tzinfo=pytz.utc)
del params['from-date']
else:
from_date = None
if params.get('to-date', None):
try:
to_date = datetime.datetime.strptime(params['to-date'], '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError, e:
LOG.warning('Could not parse to-date query parameter: %s', e)
raise
to_date = to_date.replace(tzinfo=pytz.utc)
del params['to-date']
else:
to_date = query_time
to_date = to_date.replace(tzinfo=pytz.utc)
if from_date and to_date:
query['lastReceiveTime'] = {'$gt': from_date, '$lte': to_date}
elif to_date:
query['lastReceiveTime'] = {'$lte': to_date}
if params.get('duplicateCount', None):
query['duplicateCount'] = int(params.get('duplicateCount'))
del params['duplicateCount']
if params.get('repeat', None):
query['repeat'] = True if params.get('repeat', 'true') == 'true' else False
del params['repeat']
sort = list()
direction = 1
if params.get('reverse', None):
direction = -1
del params['reverse']
if params.get('sort-by', None):
for sort_by in params.getlist('sort-by'):
if sort_by in ['createTime', 'receiveTime', 'lastReceiveTime']:
sort.append((sort_by, -direction)) # reverse chronological
else:
sort.append((sort_by, direction))
del params['sort-by']
else:
sort.append(('lastReceiveTime', -direction))
group = list()
if 'group-by' in params:
group = params.get('group-by')
del params['group-by']
if 'limit' in params:
limit = params.get('limit')
del params['limit']
else:
limit = app.config['QUERY_LIMIT']
limit = int(limit)
ids = params.getlist('id')
if len(ids) == 1:
query['$or'] = [{'_id': {'$regex': '^' + ids[0]}}, {'lastReceiveId': {'$regex': '^' + ids[0]}}]
del params['id']
elif ids:
query['$or'] = [{'_id': {'$regex': re.compile('|'.join(['^' + i for i in ids]))}}, {'lastReceiveId': {'$regex': re.compile('|'.join(['^' + i for i in ids]))}}]
del params['id']
for field in params:
value = params.getlist(field)
if len(value) == 1:
value = value[0]
if field.endswith('!'):
if value.startswith('~'):
query[field[:-1]] = dict()
query[field[:-1]]['$not'] = re.compile(value[1:], re.IGNORECASE)
else:
query[field[:-1]] = dict()
query[field[:-1]]['$ne'] = value
else:
if value.startswith('~'):
query[field] = dict()
query[field]['$regex'] = re.compile(value[1:], re.IGNORECASE)
else:
query[field] = value
else:
if field.endswith('!'):
if '~' in [v[0] for v in value]:
value = '|'.join([v.lstrip('~') for v in value])
query[field[:-1]] = dict()
query[field[:-1]]['$not'] = re.compile(value, re.IGNORECASE)
else:
query[field[:-1]] = dict()
query[field[:-1]]['$nin'] = value
else:
if '~' in [v[0] for v in value]:
value = '|'.join([v.lstrip('~') for v in value])
query[field] = dict()
query[field]['$regex'] = re.compile(value, re.IGNORECASE)
else:
query[field] = dict()
query[field]['$in'] = value
return query, sort, group, page, limit, query_time
def process_alert(incomingAlert):
for plugin in plugins:
try:
incomingAlert = plugin.pre_receive(incomingAlert)
except RejectException:
raise
except Exception as e:
raise RuntimeError('Error while running pre-receive plug-in: %s' % str(e))
if not incomingAlert:
raise SyntaxError('Plug-in pre-receive hook did not return modified alert')
if db.is_blackout_period(incomingAlert):
raise RuntimeWarning('Suppressed during blackout period')
try:
if db.is_duplicate(incomingAlert):
started = duplicate_timer.start_timer()
alert = db.save_duplicate(incomingAlert)
duplicate_timer.stop_timer(started)
elif db.is_correlated(incomingAlert):
started = correlate_timer.start_timer()
alert = db.save_correlated(incomingAlert)
correlate_timer.stop_timer(started)
else:
started = create_timer.start_timer()
alert = db.create_alert(incomingAlert)
create_timer.stop_timer(started)
except Exception as e:
raise RuntimeError(e)
for plugin in plugins:
try:
plugin.post_receive(alert)
except Exception as e:
raise RuntimeError('Error while running post-receive plug-in: %s' % str(e))
return alert
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import itertools
import operator
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
import neutron.agent.common.config
import neutron.agent.common.ovs_lib
import neutron.agent.dhcp.config
import neutron.agent.l2.extensions.manager
import neutron.agent.l3.config
import neutron.agent.l3.ha
import neutron.agent.linux.interface
import neutron.agent.linux.pd
import neutron.agent.linux.ra
import neutron.agent.metadata.config
import neutron.agent.ovsdb.api
import neutron.agent.securitygroups_rpc
import neutron.db.agents_db
import neutron.db.agentschedulers_db
import neutron.db.dvr_mac_db
import neutron.db.extraroute_db
import neutron.db.l3_agentschedulers_db
import neutron.db.l3_dvr_db
import neutron.db.l3_gwmode_db
import neutron.db.l3_hamode_db
import neutron.db.migration.cli
import neutron.extensions.allowedaddresspairs
import neutron.extensions.l3
import neutron.extensions.securitygroup
import neutron.openstack.common.cache.cache
import neutron.plugins.ml2.config
import neutron.plugins.ml2.drivers.agent.config
import neutron.plugins.ml2.drivers.linuxbridge.agent.common.config
import neutron.plugins.ml2.drivers.macvtap.agent.config
import neutron.plugins.ml2.drivers.mech_sriov.agent.common.config
import neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver
import neutron.plugins.ml2.drivers.openvswitch.agent.common.config
import neutron.plugins.ml2.drivers.type_flat
import neutron.plugins.ml2.drivers.type_geneve
import neutron.plugins.ml2.drivers.type_gre
import neutron.plugins.ml2.drivers.type_vlan
import neutron.plugins.ml2.drivers.type_vxlan
import neutron.quota
import neutron.service
import neutron.services.metering.agents.metering_agent
import neutron.services.qos.notification_drivers.manager
import neutron.wsgi
NOVA_GROUP = 'nova'
CONF = cfg.CONF
deprecations = {'nova.cafile': [cfg.DeprecatedOpt('ca_certificates_file',
group=NOVA_GROUP)],
'nova.insecure': [cfg.DeprecatedOpt('api_insecure',
group=NOVA_GROUP)],
'nova.timeout': [cfg.DeprecatedOpt('url_timeout',
group=NOVA_GROUP)]}
_nova_options = ks_loading.register_session_conf_options(
CONF, NOVA_GROUP, deprecated_opts=deprecations)
def list_agent_opts():
return [
('agent',
itertools.chain(
neutron.agent.common.config.ROOT_HELPER_OPTS,
neutron.agent.common.config.AGENT_STATE_OPTS,
neutron.agent.common.config.IPTABLES_OPTS,
neutron.agent.common.config.PROCESS_MONITOR_OPTS,
neutron.agent.common.config.AVAILABILITY_ZONE_OPTS)
),
('DEFAULT',
itertools.chain(
neutron.agent.common.config.INTERFACE_DRIVER_OPTS,
neutron.agent.metadata.config.SHARED_OPTS,
neutron.agent.metadata.config.DRIVER_OPTS)
)
]
def list_extension_opts():
return [
('DEFAULT',
neutron.extensions.allowedaddresspairs.allowed_address_pair_opts),
('quotas',
itertools.chain(
neutron.extensions.l3.l3_quota_opts,
neutron.extensions.securitygroup.security_group_quota_opts)
)
]
def list_db_opts():
return [
('DEFAULT',
itertools.chain(
neutron.db.agents_db.AGENT_OPTS,
neutron.db.extraroute_db.extra_route_opts,
neutron.db.l3_gwmode_db.OPTS,
neutron.db.agentschedulers_db.AGENTS_SCHEDULER_OPTS,
neutron.db.dvr_mac_db.dvr_mac_address_opts,
neutron.db.l3_dvr_db.router_distributed_opts,
neutron.db.l3_agentschedulers_db.L3_AGENTS_SCHEDULER_OPTS,
neutron.db.l3_hamode_db.L3_HA_OPTS)
),
('database',
neutron.db.migration.cli.get_engine_config())
]
def list_opts():
return [
('DEFAULT',
itertools.chain(
neutron.common.config.core_cli_opts,
neutron.common.config.core_opts,
neutron.wsgi.socket_opts,
neutron.service.service_opts)
),
(neutron.common.config.NOVA_CONF_SECTION,
itertools.chain(
neutron.common.config.nova_opts)
),
('quotas', neutron.quota.quota_opts)
]
def list_qos_opts():
return [
('DEFAULT',
neutron.services.qos.notification_drivers.manager.QOS_PLUGIN_OPTS)
]
def list_base_agent_opts():
return [
('DEFAULT',
itertools.chain(
neutron.agent.linux.interface.OPTS,
neutron.agent.common.config.INTERFACE_DRIVER_OPTS,
neutron.agent.common.ovs_lib.OPTS)
),
('AGENT', neutron.agent.common.config.AGENT_STATE_OPTS)
]
def list_dhcp_agent_opts():
return [
('DEFAULT',
itertools.chain(
neutron.agent.dhcp.config.DHCP_AGENT_OPTS,
neutron.agent.dhcp.config.DHCP_OPTS,
neutron.agent.dhcp.config.DNSMASQ_OPTS)
)
]
def list_linux_bridge_opts():
return [
('linux_bridge',
neutron.plugins.ml2.drivers.linuxbridge.agent.common.config.
bridge_opts),
('vxlan',
neutron.plugins.ml2.drivers.linuxbridge.agent.common.config.
vxlan_opts),
('agent',
neutron.plugins.ml2.drivers.agent.config.agent_opts),
('securitygroup',
neutron.agent.securitygroups_rpc.security_group_opts)
]
def list_l3_agent_opts():
return [
('DEFAULT',
itertools.chain(
neutron.agent.l3.config.OPTS,
neutron.service.service_opts,
neutron.agent.l3.ha.OPTS,
neutron.agent.linux.pd.OPTS,
neutron.agent.linux.ra.OPTS)
)
]
def list_macvtap_opts():
return [
('macvtap',
neutron.plugins.ml2.drivers.macvtap.agent.config.macvtap_opts),
('agent',
neutron.plugins.ml2.drivers.agent.config.agent_opts),
('securitygroup',
neutron.agent.securitygroups_rpc.security_group_opts)
]
def list_metadata_agent_opts():
return [
('DEFAULT',
itertools.chain(
neutron.agent.metadata.config.SHARED_OPTS,
neutron.agent.metadata.config.METADATA_PROXY_HANDLER_OPTS,
neutron.agent.metadata.config.UNIX_DOMAIN_METADATA_PROXY_OPTS,
neutron.openstack.common.cache.cache._get_oslo_configs())
),
('AGENT', neutron.agent.common.config.AGENT_STATE_OPTS)
]
def list_metering_agent_opts():
return [
('DEFAULT',
itertools.chain(
neutron.services.metering.agents.metering_agent.MeteringAgent.
Opts,
neutron.agent.common.config.INTERFACE_DRIVER_OPTS)
)
]
def list_ml2_conf_opts():
return [
('ml2',
neutron.plugins.ml2.config.ml2_opts),
('ml2_type_flat',
neutron.plugins.ml2.drivers.type_flat.flat_opts),
('ml2_type_vlan',
neutron.plugins.ml2.drivers.type_vlan.vlan_opts),
('ml2_type_gre',
neutron.plugins.ml2.drivers.type_gre.gre_opts),
('ml2_type_vxlan',
neutron.plugins.ml2.drivers.type_vxlan.vxlan_opts),
('ml2_type_geneve',
neutron.plugins.ml2.drivers.type_geneve.geneve_opts),
('securitygroup',
neutron.agent.securitygroups_rpc.security_group_opts)
]
def list_ml2_conf_sriov_opts():
return [
('ml2_sriov',
neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver.
sriov_opts)
]
def list_ovs_opts():
return [
('ovs',
itertools.chain(
neutron.plugins.ml2.drivers.openvswitch.agent.common.config.
ovs_opts,
neutron.agent.ovsdb.api.OPTS)
),
('agent',
neutron.plugins.ml2.drivers.openvswitch.agent.common.config.
agent_opts),
('securitygroup',
neutron.agent.securitygroups_rpc.security_group_opts)
]
def list_sriov_agent_opts():
return [
('ml2_sriov',
neutron.plugins.ml2.drivers.mech_sriov.agent.common.config.
sriov_nic_opts),
('agent',
neutron.agent.l2.extensions.manager.L2_AGENT_EXT_MANAGER_OPTS)
]
def list_auth_opts():
opt_list = copy.deepcopy(_nova_options)
opt_list.insert(0, ks_loading.get_auth_common_conf_options()[0])
# NOTE(mhickey): There are a lot of auth plugins, we just generate
# the config options for a few common ones
plugins = ['password', 'v2password', 'v3password']
for name in plugins:
for plugin_option in ks_loading.get_plugin_loader(name).get_options():
if all(option.name != plugin_option.name for option in opt_list):
opt_list.append(plugin_option)
opt_list.sort(key=operator.attrgetter('name'))
return [(NOVA_GROUP, opt_list)]
| |
# -*- coding: utf-8 -*-
""""
Test Vector Layers
------------------
"""
import json
from folium import Map
from folium.utilities import get_bounds, normalize
from folium.vector_layers import Circle, CircleMarker, PolyLine, Polygon, Rectangle
def test_circle():
m = Map()
radius = 10000
popup = 'I am {} meters'.format(radius)
location = [-27.551667, -48.478889]
circle = Circle(
location=location,
radius=radius,
color='black',
weight=2,
fill_opacity=0.6,
opacity=1,
fill=True,
popup=popup,
)
circle.add_to(m)
expected_options = {
'bubblingMouseEvents': True,
'color': 'black',
'dashArray': None,
'dashOffset': None,
'fill': True,
'fillColor': 'black',
'fillOpacity': 0.6,
'fillRule': 'evenodd',
'lineCap': 'round',
'lineJoin': 'round',
'opacity': 1,
'radius': radius,
'stroke': True,
'weight': 2,
}
m._repr_html_()
expected_rendered = """
var {name} = L.circle(
{location},
{{
"bubblingMouseEvents": true,
"color": "black",
"dashArray": null,
"dashOffset": null,
"fill": true,
"fillColor": "black",
"fillOpacity": 0.6,
"fillRule": "evenodd",
"lineCap": "round",
"lineJoin": "round",
"opacity": 1,
"radius": {radius},
"stroke": true,
"weight": 2
}}
)
.addTo({map});
""".format(name=circle.get_name(), location=location, radius=radius, map=m.get_name()) # noqa
rendered = circle._template.module.script(circle)
assert normalize(rendered) == normalize(expected_rendered)
assert circle.get_bounds() == [location, location]
assert json.dumps(circle.to_dict()) == circle.to_json()
assert circle.location == [-27.551667, -48.478889]
assert circle.options == expected_options
def test_circle_marker():
m = Map()
radius = 50
popup = 'I am {} pixels'.format(radius)
location = [-27.55, -48.8]
circle_marker = CircleMarker(
location=location,
radius=radius,
color='black',
weight=2,
fill_opacity=0.6,
opacity=1,
fill=True,
popup=popup,
)
circle_marker.add_to(m)
options = {
'bubblingMouseEvents': True,
'color': 'black',
'dashArray': None,
'dashOffset': None,
'fill': True,
'fillColor': 'black',
'fillOpacity': 0.6,
'fillRule': 'evenodd',
'lineCap': 'round',
'lineJoin': 'round',
'opacity': 1,
'radius': radius,
'stroke': True,
'weight': 2,
}
m._repr_html_()
expected_bounds = [location, location]
expected_rendered = """
var {name} = L.circleMarker(
{location},
{{
"bubblingMouseEvents": true,
"color": "black",
"dashArray": null,
"dashOffset": null,
"fill": true,
"fillColor": "black",
"fillOpacity": 0.6,
"fillRule": "evenodd",
"lineCap": "round",
"lineJoin": "round",
"opacity": 1,
"radius": {radius},
"stroke": true,
"weight": 2
}}
)
.addTo({map});
""".format(name=circle_marker.get_name(), location=location, radius=radius, map=m.get_name()) # noqa
rendered = circle_marker._template.module.script(circle_marker)
assert normalize(rendered) == normalize(expected_rendered)
assert circle_marker.get_bounds() == expected_bounds
assert json.dumps(circle_marker.to_dict()) == circle_marker.to_json()
assert circle_marker.location == location
assert circle_marker.options == options
def test_rectangle():
m = Map()
location = [[45.6, -122.8], [45.61, -122.7]]
rectangle = Rectangle(
bounds=location,
popup='I am a rectangle',
color='black',
weight=2,
fill_opacity=0.6,
opacity=1,
fill=True,
)
rectangle.add_to(m)
expected_options = {
'bubblingMouseEvents': True,
'color': 'black',
'dashArray': None,
'dashOffset': None,
'fill': True,
'fillColor': 'black',
'fillOpacity': 0.6,
'fillRule': 'evenodd',
'lineCap': 'round',
'lineJoin': 'round',
'noClip': False,
'opacity': 1,
'smoothFactor': 1.0,
'stroke': True,
'weight': 2,
}
m._repr_html_()
expected_rendered = """
var {name} = L.rectangle(
{location},
{{
"bubblingMouseEvents": true,
"color": "black",
"dashArray": null,
"dashOffset": null,
"fill": true,
"fillColor": "black",
"fillOpacity": 0.6,
"fillRule": "evenodd",
"lineCap": "round",
"lineJoin": "round",
"noClip": false,
"opacity": 1,
"smoothFactor": 1.0,
"stroke": true,
"weight": 2
}}
)
.addTo({map});
""".format(name=rectangle.get_name(), location=location, map=m.get_name())
rendered = rectangle._template.module.script(rectangle)
assert normalize(rendered) == normalize(expected_rendered)
assert rectangle.get_bounds() == location
assert json.dumps(rectangle.to_dict()) == rectangle.to_json()
assert rectangle.options == expected_options
def test_polygon_marker():
m = Map()
locations = [[35.6636, 139.7634],
[35.6629, 139.7664],
[35.6663, 139.7706],
[35.6725, 139.7632],
[35.6728, 139.7627],
[35.6720, 139.7606],
[35.6682, 139.7588],
[35.6663, 139.7627]]
polygon = Polygon(locations=locations, popup='I am a polygon')
polygon.add_to(m)
expected_options = {
'bubblingMouseEvents': True,
'color': '#3388ff',
'dashArray': None,
'dashOffset': None,
'fill': False,
'fillColor': '#3388ff',
'fillOpacity': 0.2,
'fillRule': 'evenodd',
'lineCap': 'round',
'lineJoin': 'round',
'noClip': False,
'opacity': 1.0,
'smoothFactor': 1.0,
'stroke': True,
'weight': 3,
}
m._repr_html_()
expected_rendered = """
var {name} = L.polygon(
{locations},
{{
"bubblingMouseEvents": true,
"color": "#3388ff",
"dashArray": null,
"dashOffset": null,
"fill": false,
"fillColor": "#3388ff",
"fillOpacity": 0.2,
"fillRule": "evenodd",
"lineCap": "round",
"lineJoin": "round",
"noClip": false,
"opacity": 1.0,
"smoothFactor": 1.0,
"stroke": true,
"weight": 3
}}
)
.addTo({map});
""".format(locations=locations, name=polygon.get_name(), map=m.get_name())
rendered = polygon._template.module.script(polygon)
assert normalize(rendered) == normalize(expected_rendered)
assert polygon.get_bounds() == get_bounds(locations)
assert json.dumps(polygon.to_dict()) == polygon.to_json()
assert polygon.options == expected_options
def test_polyline():
m = Map()
locations = [[40.0, -80.0], [45.0, -80.0]]
polyline = PolyLine(locations=locations, popup='I am PolyLine')
polyline.add_to(m)
expected_options = {
'smoothFactor': 1.0,
'noClip': False,
'bubblingMouseEvents': True,
'color': '#3388ff',
'dashArray': None,
'dashOffset': None,
'fill': False,
'fillColor': '#3388ff',
'fillOpacity': 0.2,
'fillRule': 'evenodd',
'lineCap': 'round',
'lineJoin': 'round',
'opacity': 1.0,
'stroke': True,
'weight': 3,
}
m._repr_html_()
expected_rendered = """
var {name} = L.polyline(
{locations},
{{
"bubblingMouseEvents": true,
"color": "#3388ff",
"dashArray": null,
"dashOffset": null,
"fill": false,
"fillColor": "#3388ff",
"fillOpacity": 0.2,
"fillRule": "evenodd",
"lineCap": "round",
"lineJoin": "round",
"noClip": false,
"opacity": 1.0,
"smoothFactor": 1.0,
"stroke": true,
"weight": 3
}}
)
.addTo({map});
""".format(locations=locations, name=polyline.get_name(), map=m.get_name())
rendered = polyline._template.module.script(polyline)
assert normalize(rendered) == normalize(expected_rendered)
assert polyline.get_bounds() == get_bounds(locations)
assert json.dumps(polyline.to_dict()) == polyline.to_json()
assert polyline.options == expected_options
def test_mulyipolyline():
m = Map()
locations = [[[45.51, -122.68], [37.77, -122.43], [34.04, -118.2]],
[[40.78, -73.91], [41.83, -87.62], [32.76, -96.72]]]
multipolyline = PolyLine(locations=locations, popup='MultiPolyLine')
multipolyline.add_to(m)
expected_options = {
'smoothFactor': 1.0,
'noClip': False,
'bubblingMouseEvents': True,
'color': '#3388ff',
'dashArray': None,
'dashOffset': None,
'fill': False,
'fillColor': '#3388ff',
'fillOpacity': 0.2,
'fillRule': 'evenodd',
'lineCap': 'round',
'lineJoin': 'round',
'opacity': 1.0,
'stroke': True,
'weight': 3,
}
m._repr_html_()
expected_rendered = """
var {name} = L.polyline(
{locations},
{{
"bubblingMouseEvents": true,
"color": "#3388ff",
"dashArray": null,
"dashOffset": null,
"fill": false,
"fillColor": "#3388ff",
"fillOpacity": 0.2,
"fillRule": "evenodd",
"lineCap": "round",
"lineJoin": "round",
"noClip": false,
"opacity": 1.0,
"smoothFactor": 1.0,
"stroke": true,
"weight": 3
}}
)
.addTo({map});
""".format(locations=locations, name=multipolyline.get_name(), map=m.get_name())
rendered = multipolyline._template.module.script(multipolyline)
assert normalize(rendered) == normalize(expected_rendered)
assert multipolyline.get_bounds() == get_bounds(locations)
assert json.dumps(multipolyline.to_dict()) == multipolyline.to_json()
assert multipolyline.options == expected_options
| |
"""provides driver function for running the app."""
from __future__ import print_function
import os
import traceback
from datetime import datetime
from typing import Any, Dict, List, Optional
from flask import Flask, g
from flask_migrate import Migrate
from werkzeug.exceptions import Forbidden, InternalServerError, NotFound
from werkzeug.middleware.proxy_fix import ProxyFix
from werkzeug.routing import BaseConverter, Map
from werkzeug.utils import ImportStringError
from config_parser import parse_config
from database import Base, create_session
from decorators import template_renderer
from exceptions import (IncompleteConfigException, MissingConfigError,
SecretKeyInstallationException)
from log_configuration import LogConfiguration
from mailer import Mailer
from mod_auth.controllers import mod_auth
from mod_ci.controllers import mod_ci
from mod_customized.controllers import mod_customized
from mod_deploy.controllers import mod_deploy
from mod_home.controllers import mod_home
from mod_regression.controllers import mod_regression
from mod_sample.controllers import mod_sample
from mod_test.controllers import mod_test
from mod_upload.controllers import mod_upload
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app) # type: ignore
# Load config
try:
config = parse_config('config')
except ImportStringError:
traceback.print_exc()
raise MissingConfigError()
app.config.from_mapping(config)
try:
app.config['DEBUG'] = os.environ['DEBUG']
except KeyError:
app.config['DEBUG'] = False
# embed flask-migrate in the app itself
try:
app.config['SQLALCHEMY_DATABASE_URI'] = app.config['DATABASE_URI']
Migrate(app, Base)
except KeyError:
traceback.print_exc()
raise IncompleteConfigException()
# Init logger
log_configuration = LogConfiguration(app.root_path,
'platform',
app.config['DEBUG'])
log = log_configuration.create_logger("Platform")
def load_secret_keys(application: Flask, secret_session: str = 'secret_key',
secret_csrf: str = 'secret_csrf') -> None:
"""
Configure the SECRET_KEY from a file in the instance directory.
If the file does not exist, print instructions to create it from a shell with a random key, then exit.
"""
do_exit = False
session_file_path = os.path.join(application.root_path, secret_session)
csrf_file_path = os.path.join(application.root_path, secret_csrf)
try:
with open(session_file_path, 'rb') as session_file:
application.config['SECRET_KEY'] = session_file.read()
except IOError:
traceback.print_exc()
print('Error: No secret key. Create it with:')
if not os.path.isdir(os.path.dirname(session_file_path)):
print(f'mkdir -p {os.path.dirname(session_file_path)}')
print(f'head -c 24 /dev/urandom > {session_file_path}')
do_exit = True
try:
with open(csrf_file_path, 'rb') as csrf_file:
application.config['CSRF_SESSION_KEY'] = csrf_file.read()
except IOError:
print('Error: No secret CSRF key. Create it with:')
if not os.path.isdir(os.path.dirname(csrf_file_path)):
print(f'mkdir -p {os.path.dirname(csrf_file_path)}')
print(f'head -c 24 /dev/urandom > {csrf_file_path}')
do_exit = True
if do_exit:
raise SecretKeyInstallationException()
if 'TESTING' not in os.environ or os.environ['TESTING'] == 'False':
load_secret_keys(app)
def sub_menu_open(menu_entries: List[Dict[str, str]], active_route: str) -> bool:
"""
Expose submenu method for jinja templates.
:param menu_entries: list of menu entry
:type menu_entries: List
:param active_route: current active flask route
:type active_route: str
:return: True if route in menu_entry and is the active_route, False otherwise
:rtype: bool
"""
for menu_entry in menu_entries:
if 'route' in menu_entry and menu_entry['route'] == active_route:
return True
return False
app.jinja_env.globals.update(sub_menu_open=sub_menu_open)
app.jinja_env.add_extension('jinja2.ext.loopcontrols')
def date_time_format(value: datetime, fmt: str = '%Y-%m-%d %H:%M:%S') -> str:
"""
Add datetime format filter.
:param value: date
:type value: datetime
:param fmt: format for the returned string, defaults to '%Y-%m-%d %H:%M:%S'
:type fmt: str, optional
:return: string representing date-time in given format
:rtype: str
"""
return value.strftime(fmt)
def get_github_issue_link(issue_id: int) -> str:
"""
Get GitHub issue link from issue_id.
:param issue_id: id of the GitHub issue
:type issue_id: int
:return: URL to the GitHub issue
:rtype: str
"""
return f'https://www.github.com/{config.get("GITHUB_OWNER", "")}/' \
f'{config.get("GITHUB_REPOSITORY", "")}/issues/{issue_id}'
def filename(filepath: str) -> str:
"""
Get filename from full filepath.
:param filepath: full path of the file
:type filepath: str
:return: filename
:rtype: str
"""
return os.path.basename(filepath)
app.jinja_env.filters['date'] = date_time_format
app.jinja_env.filters['issue_link'] = get_github_issue_link
app.jinja_env.filters['filename'] = filename
class RegexConverter(BaseConverter):
"""Establish class to handle Regex routes."""
def __init__(self, url_map: Map, *items: Any) -> None:
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
# Allow regexps in routes
app.url_map.converters['regex'] = RegexConverter
@app.errorhandler(404)
@template_renderer('404.html', 404)
def not_found(error: NotFound):
"""Handle not found error in non-existing routes."""
return
@app.errorhandler(500)
@template_renderer('500.html', 500)
def internal_error(error: InternalServerError):
"""Handle internal server error."""
log.debug(f'500 error: {error}')
log.debug('Stacktrace:')
log.debug(traceback.format_exc())
return
@app.errorhandler(403)
@template_renderer('403.html', 403)
def forbidden(error: Forbidden) -> Dict[str, str]:
"""Handle unauthorized and forbidden access error."""
user_name = 'Guest' if g.user is None else g.user.name
user_role = 'Guest' if g.user is None else g.user.role.value
log.debug(f'{user_name} (role: {user_role}) tried to access {error.description}')
return {
'user_role': user_role,
'endpoint': error.description
}
@app.before_request
def before_request() -> None:
"""Set up app before first request to the app."""
g.menu_entries = {}
g.db = create_session(app.config['DATABASE_URI'])
g.mailer = Mailer(
app.config.get('EMAIL_DOMAIN', ''), app.config.get('EMAIL_API_KEY', ''), 'CCExtractor.org CI Platform'
)
g.version = "0.1"
g.log = log
g.github = get_github_config(app.config)
def get_github_config(config: Dict[str, str]) -> Dict[str, str]:
"""
Get configuration keys for GitHub API.
:param config: app config
:type config: Config Class
:return: key-value dictionary of required GitHub keys
:rtype: dict
"""
return {
'deploy_key': config.get('GITHUB_DEPLOY_KEY', ''),
'ci_key': config.get('GITHUB_CI_KEY', ''),
'bot_token': config.get('GITHUB_TOKEN', ''),
'bot_name': config.get('GITHUB_BOT', ''),
'repository_owner': config.get('GITHUB_OWNER', ''),
'repository': config.get('GITHUB_REPOSITORY', '')
}
@app.teardown_appcontext
def teardown(exception: Optional[Exception]):
"""Free database connection at app closing."""
db = g.get('db', None)
if db is not None:
db.remove()
# Register blueprints
app.register_blueprint(mod_auth, url_prefix='/account')
app.register_blueprint(mod_upload, url_prefix='/upload')
app.register_blueprint(mod_regression, url_prefix='/regression')
app.register_blueprint(mod_sample, url_prefix='/sample')
app.register_blueprint(mod_home)
app.register_blueprint(mod_deploy)
app.register_blueprint(mod_test, url_prefix="/test")
app.register_blueprint(mod_ci)
app.register_blueprint(mod_customized, url_prefix='/custom')
| |
import random
import unittest
from datetime import datetime, timedelta
from django import template
from django.conf import settings
from django.contrib import comments
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ViewDoesNotExist
from django.core.urlresolvers import reverse
from django.db import models
from django.template import Template
from django.template.defaultfilters import slugify
from django.test import TestCase
from django.test.client import Client
from panya.admin import ModelBaseAdmin
from panya.models import ModelBase
from panya.utils.tests import RequestFactory
from photologue.models import PhotoSize
from secretballot.models import Vote
class DummyRelationalModel1(models.Model):
pass
models.register_models('panya', DummyRelationalModel1)
class DummyRelationalModel2(models.Model):
pass
models.register_models('panya', DummyRelationalModel2)
class DummyModel(ModelBase):
test_editable_field = models.CharField(max_length=32)
test_non_editable_field = models.CharField(max_length=32, editable=False)
test_foreign_field = models.ForeignKey('DummyRelationalModel1', blank=True, null=True,)
test_many_field = models.ManyToManyField('DummyRelationalModel2')
test_member = True
models.register_models('panya', DummyModel)
class TrunkModel(ModelBase):
pass
models.register_models('panya', TrunkModel)
class BranchModel(TrunkModel):
pass
models.register_models('panya', BranchModel)
class LeafModel(BranchModel):
pass
models.register_models('panya', LeafModel)
class TestModel(ModelBase):
pass
models.register_models('panya', TestModel)
class UtilsTestCase(unittest.TestCase):
def test_generate_slug(self):
# on save a slug should be set
obj = ModelBase(title='utils test case title')
obj.save()
self.failIf(obj.slug=='')
# slug should become sluggified version of title
obj = ModelBase(title='utils test case title 1')
obj.save()
self.failUnless(obj.slug==slugify(obj.title))
# no two items should have the same slug
obj = ModelBase(title='utils test case title 1')
obj.save()
# in case an object title is updated, the slug should also be updated
obj.title = "updated title"
obj.save()
self.failUnless(obj.slug==slugify(obj.title))
# in case an object is updated, without the title being changed, the slug should remain unchanged
orig_slug = obj.slug
obj.save()
self.failUnless(obj.slug==orig_slug)
# make sure the slug is actually saved
obj = ModelBase.objects.get(id=obj.id)
self.failIf(obj.slug=='')
# Empty slugs might trip up regex query.
obj = ModelBase()
obj.save()
obj = ModelBase()
obj.save()
obj = ModelBase()
obj.save()
class ModelBaseTestCase(unittest.TestCase):
def test_save(self):
before_save = datetime.now()
# created field should be set on save
obj = ModelBase(title='title')
obj.save()
# created field should be set to current datetime on save
after_save = datetime.now()
self.failIf(obj.created > after_save or obj.created < before_save)
# if a user supplies a created date use that instead of the current datetime
test_datetime = datetime(2008, 10, 10, 12, 12)
obj = ModelBase(title='title', created=test_datetime)
obj.save()
self.failIf(obj.created != test_datetime)
# modified should be set to current datetime on each save
before_save = datetime.now()
obj = ModelBase(title='title')
obj.save()
after_save = datetime.now()
self.failIf(obj.modified > after_save or obj.modified < before_save)
# leaf class content type should be set on save
obj = DummyModel(title='title')
obj.save()
self.failUnless(obj.content_type == ContentType.objects.get_for_model(DummyModel))
# leaf class class name should be set on save
self.failUnless(obj.class_name == DummyModel.__name__)
# correct leaf class content type should be retained over base class' content type
base = obj.modelbase_ptr
base.save()
self.failUnless(base.content_type == ContentType.objects.get_for_model(DummyModel))
# correct leaf class class name should be retained over base class' class name
self.failUnless(base.class_name == DummyModel.__name__)
def test_as_leaf_class(self):
obj = LeafModel(title='title')
obj.save()
# always return the leaf class, no matter where we are in the hierarchy
self.failUnless(TrunkModel.objects.get(slug=obj.slug).as_leaf_class() == obj)
self.failUnless(BranchModel.objects.get(slug=obj.slug).as_leaf_class() == obj)
self.failUnless(LeafModel.objects.get(slug=obj.slug).as_leaf_class() == obj)
def test_vote_total(self):
# create object with some votes
obj = ModelBase(title='title')
obj.save()
obj.add_vote("token1", 1)
obj.add_vote("token2", -1)
obj.add_vote("token3", 1)
# vote_total should return an integer
result = obj.vote_total
self.failUnlessEqual(result.__class__, int)
# vote total is calculated as total_upvotes - total_downvotes
self.failUnlessEqual(result, 1)
def test_is_permitted(self):
# create website site item and set as current site
web_site = Site(domain="web.address.com")
web_site.save()
settings.SITE_ID = web_site.id
# create unpublished item
unpublished_obj = ModelBase(title='title', state='unpublished')
unpublished_obj.save()
unpublished_obj.sites.add(web_site)
unpublished_obj.save()
# create published item
published_obj = ModelBase(title='title', state='published')
published_obj.save()
published_obj.sites.add(web_site)
published_obj.save()
# create staging item
staging_obj = ModelBase(title='title', state='staging')
staging_obj.save()
staging_obj.sites.add(web_site)
staging_obj.save()
# is_permitted should be False for unpublished objects
self.failIf(unpublished_obj.is_permitted)
# is_permitted should be True for published objects
self.failUnless(published_obj.is_permitted)
# is_permitted should be True for otherwise published objects in the staging state for instances that define settings.STAGING = True
settings.STAGING = False
self.failIf(staging_obj.is_permitted)
settings.STAGING = True
self.failUnless(staging_obj.is_permitted)
# is_permitted should be True only if the object is published for the current site
published_obj_web = ModelBase(state='published')
published_obj_web.save()
published_obj_web.sites.add(web_site)
published_obj_web.save()
self.failUnless(published_obj_web.is_permitted)
# is_permitted should be False if the object is not published for the current site
mobile_site = Site(domain="mobi.address.com")
mobile_site.save()
published_obj_mobile = ModelBase(state='published')
published_obj_mobile.save()
published_obj_mobile.sites.add(mobile_site)
published_obj_mobile.save()
self.failIf(published_obj_mobile.is_permitted)
def test_can_vote(self):
# create dummy request object
request = type('Request', (object,), {})
class User():
def is_authenticated(self):
return False
request.user = User()
request.secretballot_token = 'test_token'
# return false when liking is closed
obj = ModelBase(likes_enabled=True, likes_closed=True, anonymous_likes=True)
obj.save()
self.failIf(obj.can_vote(request)[0])
# return false when liking is disabled
obj = ModelBase(likes_enabled=False, likes_closed=False, anonymous_likes=True)
obj.save()
self.failIf(obj.can_vote(request)[0])
# return false if anonymous and anonymous liking is disabled
obj = ModelBase(likes_enabled=True, likes_closed=False, anonymous_likes=False)
obj.save()
self.failIf(obj.can_vote(request)[0])
# return true if anonymous and anonymous liking is enabled
obj = ModelBase(likes_enabled=True, likes_closed=False, anonymous_likes=True)
obj.save()
self.failUnless(obj.can_vote(request))
# return false if vote already exist
content_type = ContentType.objects.get(app_label="panya", model="modelbase")
Vote.objects.create(object_id=obj.id, token='test_token', content_type=content_type, vote=1)
self.failIf(obj.can_vote(request)[0])
def test_comment_count(self):
comment_model = comments.get_model()
# Return 0 if no comments exist.
obj = ModelBase()
obj.save()
self.failUnless(obj.comment_count == 0)
# Return the number of comments if comments exist. Here it should be 1 since we've created 1 comment.
comment_obj = comment_model(content_object=obj, site_id=1)
comment_obj.save()
self.failUnless(obj.comment_count == 1)
# Return 0 if no comments exist.
dummy_obj = DummyModel()
dummy_obj.save()
self.failUnless(dummy_obj.comment_count == 0)
# Return the number of comments if comments exist on the ModelBase object.
# Here it should be 1 since we've created 1 comment on the ModelBase object.
comment_obj = comment_model(content_object=dummy_obj.modelbase_obj, site_id=1)
comment_obj.save()
self.failUnless(dummy_obj.modelbase_obj.comment_count == 1)
# If a comment was made on the ModelBase object it should still count for leaf class objects.
self.failUnless(dummy_obj.comment_count == 1)
# Add another comment on dummy object and make sure the count is 2 for both the dummy object and its modelbase object.
comment_obj = comment_model(content_object=dummy_obj, site_id=1)
comment_obj.save()
self.failUnless(dummy_obj.comment_count == 2)
self.failUnless(dummy_obj.modelbase_obj.comment_count == 2)
# There should now only be 3 comment objects.
self.failUnless(comment_model.objects.all().count() == 3)
def test_can_comment(self):
# create dummy request object
request = type('Request', (object,), {})
class User():
def is_authenticated(self):
return False
request.user = User()
request.secretballot_token = 'test_token'
# return false when commenting is closed
obj = ModelBase(comments_enabled=True, comments_closed=True, anonymous_comments=True)
obj.save()
self.failIf(obj.can_comment(request))
# return false when commenting is disabled
obj = ModelBase(comments_enabled=False, comments_closed=False, anonymous_comments=True)
obj.save()
self.failIf(obj.can_comment(request))
# return false if anonymous and anonymous commenting is disabled
obj = ModelBase(comments_enabled=True, comments_closed=False, anonymous_comments=False)
obj.save()
self.failIf(obj.can_comment(request))
# return true if anonymous and anonymous commenting is enabled
obj = ModelBase(comments_enabled=True, comments_closed=False, anonymous_comments=True)
obj.save()
self.failUnless(obj.can_comment(request))
class ModelBaseAdminTestCase(unittest.TestCase):
def setUp(self):
self.user, self.created = User.objects.get_or_create(username='test', email='test@test.com')
def test_field_hookup(self):
model_admin = ModelBaseAdmin(DummyModel, AdminSite())
# field additions should be added to first fieldsets' fields
self.failIf('test_editable_field' not in model_admin.fieldsets[0][1]['fields'])
self.failIf('test_foreign_field' not in model_admin.fieldsets[0][1]['fields'])
self.failIf('test_many_field' not in model_admin.fieldsets[0][1]['fields'])
# non editable field additions should not be added to fieldsets
self.failIf('test_non_editable_field' in model_admin.fieldsets[0][1]['fields'])
# non field class members should not be added to fieldsets
self.failIf('test_member' in model_admin.fieldsets[0][1]['fields'])
def test_save_model(self):
# setup mock objects
admin_obj = ModelBaseAdmin(ModelBase, 1)
request = RequestFactory()
request.user = self.user
# after admin save the object's owner should be the current user
obj = ModelBase()
admin_obj.save_model(request, obj, admin_obj.form, 1)
self.failUnless(obj.owner == self.user)
obj.save()
# TODO: if a different user is specified as owner set that user as owner
class PermittedManagerTestCase(unittest.TestCase):
def setUp(self):
# create website site item and set as current site
self.web_site = Site(domain="web.address.com")
self.web_site.save()
settings.SITE_ID = self.web_site.id
def test_get_query_set(self):
# create unpublished item
unpublished_obj = ModelBase(title='title', state='unpublished')
unpublished_obj.save()
unpublished_obj.sites.add(self.web_site)
unpublished_obj.save()
# create published item
published_obj = ModelBase(title='title', state='published')
published_obj.save()
published_obj.sites.add(self.web_site)
published_obj.save()
# create staging item
staging_obj = ModelBase(title='title', state='staging')
staging_obj.save()
staging_obj.sites.add(self.web_site)
staging_obj.save()
# unpublished objects should not be available in queryset
queryset = ModelBase.permitted.all()
self.failIf(unpublished_obj in queryset)
# published objects should always be available in queryset
self.failUnless(published_obj in queryset)
# staging objects should only be available on instances that define settings.STAGING = True
settings.STAGING = False
queryset = ModelBase.permitted.all()
self.failIf(staging_obj in queryset)
settings.STAGING = True
queryset = ModelBase.permitted.all()
self.failUnless(staging_obj in queryset)
# queryset should only contain items for the current site
published_obj_web = ModelBase(state='published')
published_obj_web.save()
published_obj_web.sites.add(self.web_site)
published_obj_web.save()
queryset = ModelBase.permitted.all()
self.failUnless(published_obj_web in queryset)
# queryset should not contain items for other sites
mobile_site = Site(domain="mobi.address.com")
mobile_site.save()
published_obj_mobile = ModelBase(state='published')
published_obj_mobile.save()
published_obj_mobile.sites.add(mobile_site)
published_obj_mobile.save()
queryset = ModelBase.permitted.all()
self.failIf(published_obj_mobile in queryset)
def test_content_type(self):
obj = BranchModel(title='title', state='published')
obj.save()
obj.sites.add(self.web_site)
obj.save()
# queryset should return objects of the same type as the queried model
queryset = BranchModel.permitted.all()
self.failUnless(obj in queryset)
queryset = ModelBase.permitted.all()
self.failIf(obj in queryset)
class InclusionTagsTestCase(unittest.TestCase):
def setUp(self):
obj = TestModel(title='title', state='published')
obj.save()
self.context = template.Context({'object': obj})
def test_render_tag(self):
# load correct template for provided object and type
t = Template("{% load panya_inclusion_tags %}{% render_object object 'block' %}")
result = t.render(self.context)
expected_result = u'Test string for testing purposes\n'
self.failUnlessEqual(result, expected_result)
# if template is not available for object, fall back to default content template
obj = BranchModel(title='title', state='published')
obj.save()
self.context = template.Context({'object': obj})
t = Template("{% load panya_inclusion_tags %}{% render_object object 'block' %}")
result = t.render(self.context)
self.failUnless(result)
# return the empty string if no template can be found for the given type for either obj or content.
t = Template("{% load panya_inclusion_tags %}{% render_object object 'foobar' %}")
result = t.render(self.context)
expected_result = u''
self.failUnlessEqual(result, expected_result)
class TemplateTagsTestCase(unittest.TestCase):
def setUp(self):
def url_callable(obj):
return 'Test URL method using object %s' % obj.__class__.__name__
obj = TestModel(title='title', state='published')
obj.save()
self.context = template.Context({'object': obj, 'url_callable': url_callable})
def test_smart_url(self):
# return method call with result based on object provided
t = Template("{% load panya_template_tags %}{% smart_url url_callable object %}")
result = t.render(self.context)
self.failUnlessEqual(result, 'Test URL method using object TestModel')
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various classes representing distributed values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import weakref
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import packed_distributed_variable as packed
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values_util
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import ops
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.saved_model import save_context
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.types import core
from tensorflow.python.util.tf_export import tf_export
def _on_write_update_replica(var, update_fn, value, **kwargs):
"""Updates variables with ON_WRITE synchronization in replica context."""
if var.aggregation == vs.VariableAggregation.NONE:
return update_fn(var._get_on_device_or_primary(), value, **kwargs) # pylint: disable=protected-access
def merge_fn(strategy, value, **kwargs):
"""Aggregate values and update all variables in cross replica context."""
# Don't allow MEAN with non float dtype, since it may cause unexpected
# precision loss. Python3 and NumPy automatically upcast integers to
# float in division, but we should always preserve the type.
#
# Note that to be backward compatible we allow the case when the value
# is *always* the same on each replica. I.E. value is not a
# PerReplica. Refer to regroup() to see how values are grouped.
if var.aggregation == vs.VariableAggregation.MEAN and (
not var.dtype.is_floating) and isinstance(value, PerReplica):
raise ValueError(
"Cannot update non-float variables with "
"tf.VariableAggregation.MEAN aggregation in replica context. "
"Either change the variable dtype to float or update it in "
"cross-replica context.")
assert strategy == var.distribute_strategy
v = values_util.apply_aggregation(strategy, value, var.aggregation, var)
return var._update_cross_replica(update_fn, v, **kwargs) # pylint: disable=protected-access
return ds_context.get_replica_context().merge_call(
merge_fn, args=(value,), kwargs=kwargs)
@tf_export("distribute.DistributedValues", v1=[])
class DistributedValues(object):
"""Base class for representing distributed values.
A subclass instance of `tf.distribute.DistributedValues` is created when
creating variables within a distribution strategy, iterating a
`tf.distribute.DistributedDataset` or through `tf.distribute.Strategy.run`.
This base class should never be instantiated directly.
`tf.distribute.DistributedValues` contains a value per replica. Depending on
the subclass, the values could either be synced on update, synced on demand,
or never synced.
`tf.distribute.DistributedValues` can be reduced to obtain single value across
replicas, as input into `tf.distribute.Strategy.run` or the per-replica values
inspected using `tf.distribute.Strategy.experimental_local_results`.
Example usage:
1. Created from a `tf.distribute.DistributedDataset`:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)
>>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> distributed_values = next(dataset_iterator)
2. Returned by `run`:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> @tf.function
... def run():
... ctx = tf.distribute.get_replica_context()
... return ctx.replica_id_in_sync_group
>>> distributed_values = strategy.run(run)
3. As input into `run`:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)
>>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> distributed_values = next(dataset_iterator)
>>> @tf.function
... def run(input):
... return input + 1.0
>>> updated_value = strategy.run(run, args=(distributed_values,))
4. Reduce value:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)
>>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> distributed_values = next(dataset_iterator)
>>> reduced_value = strategy.reduce(tf.distribute.ReduceOp.SUM,
... distributed_values,
... axis = 0)
5. Inspect local replica values:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)
>>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> per_replica_values = strategy.experimental_local_results(
... distributed_values)
>>> per_replica_values
(<tf.Tensor: shape=(1,), dtype=float32, numpy=array([5.], dtype=float32)>,
<tf.Tensor: shape=(1,), dtype=float32, numpy=array([6.], dtype=float32)>)
"""
def __init__(self, values):
"""Should only be called by subclass __init__."""
self._values = tuple(values)
def _get(self):
"""Returns the value for the current device or raises a ValueError."""
replica_id = values_util.get_current_replica_id_as_int()
if replica_id is None:
return self._get_cross_replica()
else:
return self._values[replica_id]
def _get_cross_replica(self):
raise NotImplementedError(
"This method should be overridden by sub-classes which support cross-"
"replica accesses.")
def _get_on_device_or_primary(self):
"""Returns value in same replica or device if possible, else the _primary."""
replica_id = values_util.get_current_replica_id_as_int()
if replica_id is None:
# Try to find a value on the current device.
current_device = device_util.canonicalize(device_util.current())
for value in self._values:
if device_util.canonicalize(value.device) == current_device:
return value
return self._primary
else:
return self._values[replica_id]
@property
def _primary(self):
"""Returns a representative component."""
return self._values[0]
@property
def _devices(self):
return tuple(v.device for v in self._values)
def __str__(self):
debug_str = ",\n".join(
" %d: %s" % (i, v) for i, v in enumerate(self._values))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_str)
def __repr__(self):
debug_repr = ",\n".join(
" %d: %r" % (i, v) for i, v in enumerate(self._values))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_repr)
# NOTE(josh11b,apassos): It would be great if we could inspect the values this was
# initialized with and use that to generate the overloaded operators here.
# Unfortunately, Python's rules for special methods don't allow this, see
# https://docs.python.org/3/reference/datamodel.html#special-method-names
# "if a class defines a method named __getitem__(), and x is an instance of
# this class, then x[i] is roughly equivalent to type(x).__getitem__(x, i)."
# In particular, these special methods don't go through __getattr__, and
# it will only use those methods if they are defined in the class, not the
# object.
class DistributedDelegate(DistributedValues):
"""A map from device to values; acts as the same type as the values."""
def __getattr__(self, name):
# The '_use_resource_variables' and the attrs starts with '_self' are used
# for restoring the saved_model proto, and '_attribute_sentinel' is used for
# Layer tracking. At the point these attrs are queried, the variable has not
# been initialized. Thus it should not query those of the underlying
# components.
if name.startswith("_self_") or name in ("_use_resource_variables",
"_attribute_sentinel",
"_distributed_container"):
return super(DistributedDelegate, self).__getattr__(name)
# This allows copy.copy(DistributedDelegate). When copying an object,
# copy.copy doesn't invoke its __init__ method, instead it makes a new
# empty object, then copies the attributes over. copy.copy looks for
# attributes like "__getstate__" in case the object implements its custom
# copying. Since DistributedDelegate doesn't have those attributes defined,
# __getattr__ will be invoked, which tries to access "_values" attributes,
# but that doesn't exist either because this is an empty object, and again
# __getattr__ is invoked, leading to an infinite recursion.
if name == "_values":
raise AttributeError()
# TODO(priyag): This needs to be made robust against pitfalls from mix use
# __getattr__ and @property. See b/120402273.
return getattr(self._get(), name)
@property
def values(self):
"""Returns the per replica values."""
return self._values
def _get_as_operand(self):
"""Returns the value for operations for the current device.
Some implementations, e.g. `TPUMirroredVariable`, are not able to return the
value type within a replica context. They can, however, return a value that
can be used by the operations below.
"""
return self._get()
# pylint: disable=multiple-statements
def __add__(self, o):
return self._get_as_operand() + o
def __radd__(self, o):
return o + self._get_as_operand()
def __sub__(self, o):
return self._get_as_operand() - o
def __rsub__(self, o):
return o - self._get_as_operand()
def __mul__(self, o):
return self._get_as_operand() * o
def __rmul__(self, o):
return o * self._get_as_operand()
def __truediv__(self, o):
return self._get_as_operand() / o
def __rtruediv__(self, o):
return o / self._get_as_operand()
def __floordiv__(self, o):
return self._get_as_operand() // o
def __rfloordiv__(self, o):
return o // self._get_as_operand()
def __mod__(self, o):
return self._get_as_operand() % o
def __rmod__(self, o):
return o % self._get_as_operand()
def __lt__(self, o):
return self._get_as_operand() < o
def __le__(self, o):
return self._get_as_operand() <= o
def __gt__(self, o):
return self._get_as_operand() > o
def __ge__(self, o):
return self._get_as_operand() >= o
def __and__(self, o):
return self._get_as_operand() & o
def __rand__(self, o):
return o & self._get_as_operand()
def __or__(self, o):
return self._get_as_operand() | o
def __ror__(self, o):
return o | self._get_as_operand()
def __xor__(self, o):
return self._get_as_operand() ^ o
def __rxor__(self, o):
return o ^ self._get_as_operand()
def __getitem__(self, o):
return self._get_as_operand()[o]
def __pow__(self, o, modulo=None):
return pow(self._get_as_operand(), o, modulo)
def __rpow__(self, o):
return pow(o, self._get_as_operand())
def __invert__(self):
return ~self._get_as_operand()
def __neg__(self):
return -self._get_as_operand()
def __abs__(self):
return abs(self._get_as_operand())
def __div__(self, o):
try:
return self._get_as_operand().__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self._get_as_operand().__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self._get_as_operand().__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self._get_as_operand().__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
# TODO(josh11b): Even more operator overloads.
class PerReplica(DistributedValues, composite_tensor.CompositeTensor):
"""Holds a map from replica to unsynchronized values."""
@property
def _type_spec(self):
return PerReplicaSpec(
*(type_spec.type_spec_from_value(v) for v in self._values))
@property
def values(self):
"""Returns the per replica values."""
return self._values
class PerReplicaSpec(type_spec.TypeSpec):
"""Type specification for a `PerReplica`."""
__slots__ = ["_value_specs"]
value_type = property(lambda self: PerReplica)
def __init__(self, *value_specs):
self._value_specs = tuple(value_specs)
def _serialize(self):
return self._value_specs
@property
def _component_specs(self):
return self._value_specs
def _to_components(self, value):
replica_context = ds_context.get_replica_context()
if replica_context is not None and replica_context.num_replicas_in_sync > 1:
raise ValueError(
"Flattening a PerReplica to components is not supported in replica "
"context.")
return value._values # pylint: disable=protected-access
def _from_components(self, tensor_list):
return PerReplica(tensor_list)
# Note that unlike PerReplica, Mirrored values inherit from
# DistributedDelegate and so can be used directly in cross-replica mode.
# TODO(tomhennigan) Should this extend CompositeTensor?
class Mirrored(DistributedDelegate):
"""Holds a map from replica to values which are kept in sync."""
def _get_cross_replica(self):
return self._get_on_device_or_primary()
def _as_graph_element(self):
obj = self._get()
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return obj
class DistributedVarOp(object):
"""A class that looks like `tf.Operation`."""
def __init__(self, name, graph, traceback, typ):
self.name = name
self.graph = graph
self.traceback = traceback
self.type = typ
def __eq__(self, o):
if not isinstance(o, self.__class__):
raise NotImplementedError
return (self.name == o.name and self.graph == o.graph and
self.traceback == o.traceback and self.type == o.type)
def __hash__(self):
return hash((self.name, self.graph, tuple(self.traceback), self.type))
class DistributedVariable(DistributedDelegate, variables_lib.Variable,
core.Tensor):
"""Holds a map from replica to variables."""
def __init__(self, strategy, values, aggregation, var_policy=None):
self._distribute_strategy = strategy
self._aggregation = aggregation
super(DistributedVariable, self).__init__(values)
self._common_name = self._primary.name.split(":")[0]
# Use a weakref to make it easy to map from the contained values
# to the container without introducing a reference cycle.
for v in values:
v._distributed_container = weakref.ref(self) # pylint: disable=protected-access
# Packed variable is used to reduce the overhead of function execution.
# For a DistributedVariable, only one variable handle is captured into a
# function graph. It's only supported in eager mode.
if ops.executing_eagerly_outside_functions() and getattr(
strategy, "_enable_packed_variable_in_eager_mode", False):
name = "%s/packed/" % self._common_name
self._packed_var = packed.PackedDistributedVariable(values, name=name)
else:
self._packed_var = None
# tf.keras keeps track of variables initialized using this attribute. When
# tf.keras gets the default session, it initializes all uninitialized vars.
# We need to make _keras_initialized a member of DistributedVariable because
# without this it will use `__getattr__` which will delegate to a component
# variable.
self._keras_initialized = False
# Typically, a `DistributedVariable`'s initializer is composed of the
# initializers of the components variables. However, in some cases, such as
# when restoring from a checkpoint, we may set the _initializer_op
# property on the entire `DistributedVariable`.
self._initializer_op = None
# Set a VariablePolicy which decides how we replicate/aggregate the given
# variable.
self._policy = var_policy
def __deepcopy__(self, memo):
"""Perform a deepcopy of the `DistributedVariable`.
Unlike the deepcopy of a regular tf.Variable, this keeps the original
strategy and devices of the `DistributedVariable`. To avoid confusion
with the behavior of deepcopy on a regular `Variable` (which does
copy into new devices), we only allow a deepcopy of a `DistributedVariable`
within its originating strategy scope.
Args:
memo: The memoization object for `deepcopy`.
Returns:
A deep copy of the current `DistributedVariable`.
Raises:
RuntimeError: If trying to deepcopy into a different strategy.
"""
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
new_values = []
for value in self._values:
with ops.device(value.device):
new_values.append(copy.deepcopy(value, memo))
copied_variable = type(self)(
strategy=self._distribute_strategy,
values=new_values,
aggregation=self._aggregation,
var_policy=copy.deepcopy(self._policy, memo))
memo[id(self)] = copied_variable
return copied_variable
def _use_packed_variable(self):
# Don't use packed variable when under a SaveContext to avoid explicit
# device placement on variable consuming ops.
return self._packed_var is not None and not save_context.in_save_context()
def is_initialized(self, name=None):
"""Identifies if all the component variables are initialized.
Args:
name: Name of the final `logical_and` op.
Returns:
The op that evaluates to True or False depending on if all the
component variables are initialized.
"""
if values_util.is_saving_non_distributed():
return self._primary.is_initialized()
if self._use_packed_variable():
return self._packed_var.is_initialized()
result = self._primary.is_initialized()
# We iterate through the list of values except the last one to allow us to
# name the final `logical_and` op the same name that is passed by the user
# to the `is_initialized` op. For distributed variables, the
# `is_initialized` op is a `logical_and` op.
for v in self._values[1:-1]:
result = math_ops.logical_and(result, v.is_initialized())
result = math_ops.logical_and(
result, self._values[-1].is_initialized(), name=name)
return result
@property
def initializer(self):
if values_util.is_saving_non_distributed():
return self._primary.initializer
if self._initializer_op:
init_op = self._initializer_op
else:
# return grouped ops of all the var initializations of component values of
# the mirrored variable
init_op = control_flow_ops.group(
tuple(v.initializer for v in self._values))
return init_op
def initialized_value(self):
return self._get_on_device_or_primary().initialized_value()
@property
def initial_value(self):
return self._get_on_device_or_primary().initial_value
@property
def constraint(self):
return self._primary.constraint
@property
def graph(self):
return self._primary.graph
@property
def _shared_name(self):
return self._common_name
@property
def _unique_id(self):
return self._primary._unique_id # pylint: disable=protected-access
@property
def _graph_key(self):
"""Lets Optimizers know which graph this variable is from."""
return self._primary._graph_key # pylint: disable=protected-access
@property
def name(self):
return self._primary.name
@property
def dtype(self):
return self._primary.dtype
@property
def shape(self):
return self._primary.shape
@property
def synchronization(self):
return self._primary.synchronization
@property
def aggregation(self):
return self._aggregation
@property
def _packed_variable(self):
if self._use_packed_variable():
return self._packed_var
return None
@property
def handle(self):
if values_util.is_saving_non_distributed():
return self._primary.handle
replica_id = values_util.get_current_replica_id_as_int()
if replica_id is None:
raise ValueError("`handle` is not available outside the replica context"
" or a `tf.distribute.Strategy.update()` call.")
else:
if self._use_packed_variable():
return self._packed_var.handle
return self._values[replica_id].handle
def eval(self, session=None):
return self._get_on_device_or_primary().eval(session)
@property
def _save_slice_info(self):
return self._primary._save_slice_info # pylint: disable=protected-access
def _get_save_slice_info(self):
return self._primary._get_save_slice_info() # pylint: disable=protected-access
def _set_save_slice_info(self, save_slice_info):
for v in self._values:
v._set_save_slice_info(save_slice_info) # pylint: disable=protected-access
@property
def device(self):
return self._get_on_device_or_primary().device
@property
def trainable(self):
return self._primary.trainable
@property
def distribute_strategy(self):
return self._distribute_strategy
def get_shape(self):
return self._primary.get_shape()
def to_proto(self, export_scope=None):
return self._primary.to_proto(export_scope=export_scope)
@property
def op(self):
if values_util.is_saving_non_distributed():
return self._primary.op
# We want cross-replica code that does some var.op.X calls
# to work (even if the current device isn't in self._devices), but
# other uses of var.op in a cross-replica context to fail.
if ds_context.in_cross_replica_context():
return DistributedVarOp(self._primary.op.name, self._primary.op.graph,
self._primary.op.traceback, self._primary.op.type)
return self._get().op
@property
def _in_graph_mode(self):
return self._primary._in_graph_mode # pylint: disable=protected-access
def _get_replica(self, replica_id):
"""Returns the value on a device with the given replica_id."""
if self._use_packed_variable():
return self._packed_var.on_device(self._devices[replica_id])
return self._values[replica_id]
def _get(self):
"""Returns the value for the current device or raises a ValueError."""
if values_util.is_saving_non_distributed():
return self._primary
replica_id = values_util.get_current_replica_id_as_int()
if replica_id is None:
return self._get_cross_replica()
else:
return self._get_replica(replica_id)
def _get_on_device_or_primary(self):
"""Returns value in same replica or device if possible, else the _primary."""
if values_util.is_saving_non_distributed():
return self._primary
replica_id = values_util.get_current_replica_id_as_int()
if replica_id is None:
# Try to find a value on the current device.
current_device = device_util.canonicalize(device_util.current())
for i, value in enumerate(self._values):
if device_util.canonicalize(value.device) == current_device:
return self._get_replica(i)
return self._get_replica(0)
else:
return self._get_replica(replica_id)
def read_value(self):
if values_util.is_saving_non_distributed():
return self._primary.read_value()
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
return array_ops.identity(self._get())
def value(self):
if values_util.is_saving_non_distributed():
return self._primary.value()
if self._policy:
return self._policy.value(self)
return self._get_on_device_or_primary().value()
def numpy(self):
if context.executing_eagerly():
return self.read_value().numpy()
else:
raise NotImplementedError(
"numpy() is only available when eager execution is enabled.")
def assign_sub(self, value, use_locking=False, name=None, read_value=True):
if values_util.is_saving_non_distributed():
return self._primary.assign_sub(value, use_locking, name, read_value)
if self._policy:
return self._policy.assign_sub(
self,
value,
use_locking=use_locking,
name=name,
read_value=read_value)
return values_util.on_write_assign_sub(
self, value, use_locking=use_locking, name=name, read_value=read_value)
def assign_add(self, value, use_locking=False, name=None, read_value=True):
if values_util.is_saving_non_distributed():
return self._primary.assign_add(value, use_locking, name, read_value)
if self._policy:
return self._policy.assign_add(
self,
value,
use_locking=use_locking,
name=name,
read_value=read_value)
return values_util.on_write_assign_add(
self, value, use_locking=use_locking, name=name, read_value=read_value)
def assign(self, value, use_locking=False, name=None, read_value=True):
if values_util.is_saving_non_distributed():
return self._primary.assign(value, use_locking, name, read_value)
if self._policy:
return self._policy.assign(
self,
value,
use_locking=use_locking,
name=name,
read_value=read_value)
return values_util.on_write_assign(
self, value, use_locking=use_locking, name=name, read_value=read_value)
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_sub(sparse_delta, use_locking, name)
if self._policy:
return self._policy.scatter_sub(
self, sparse_delta, use_locking=use_locking, name=name)
return values_util.scatter_sub(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_add(sparse_delta, use_locking, name)
if self._policy:
return self._policy.scatter_add(
self, sparse_delta, use_locking=use_locking, name=name)
return values_util.scatter_add(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_mul(sparse_delta, use_locking, name)
if self._policy:
return self._policy.scatter_mul(
self, sparse_delta, use_locking=use_locking, name=name)
return values_util.scatter_mul(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_div(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_div(sparse_delta, use_locking, name)
if self._policy:
return self._policy.scatter_div(
self, sparse_delta, use_locking=use_locking, name=name)
return values_util.scatter_div(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_min(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_min(sparse_delta, use_locking, name)
if self._policy:
return self._policy.scatter_min(
self, sparse_delta, use_locking=use_locking, name=name)
return values_util.scatter_min(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_max(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_max(sparse_delta, use_locking, name)
if self._policy:
return self._policy.scatter_max(
self, sparse_delta, use_locking=use_locking, name=name)
return values_util.scatter_max(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_update(sparse_delta, use_locking, name)
if self._policy:
return self._policy.scatter_update(
self, sparse_delta, use_locking=use_locking, name=name)
return values_util.scatter_update(
self, sparse_delta, use_locking=use_locking, name=name)
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
DistributedVariables.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _DistributedVariableSaveable(self, self._primary, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
def _as_graph_element(self):
if values_util.is_saving_non_distributed():
return self._primary._as_graph_element() # pylint: disable=protected-access
if self._policy:
return self._policy._as_graph_element(self) # pylint: disable=protected-access
raise NotImplementedError("No policy set for calling _as_graph_element.")
def _get_cross_replica(self):
if values_util.is_saving_non_distributed():
return self._primary
if self._policy:
return self._policy._get_cross_replica(self) # pylint: disable=protected-access
raise NotImplementedError(
"This method should be overridden by sub-classes which support cross-"
"replica accesses.")
def _update_cross_replica(self, update_fn, value, **kwargs):
"""Applies updates across replicas.
Args:
update_fn: A callable to pass to `strategy.extended.update` to update the
variable. It should has the same signature as `Variable.assign()`.
value: value to be passed to `update_fn`.
**kwargs: remaining arguments to `update_fn`.
Returns:
Updated variable or `tf.Operation`.
"""
return self.distribute_strategy.extended.update(
self, update_fn, args=(value,), kwargs=kwargs, group=True)
def _update_replica(self, update_fn, value, **kwargs):
"""Applies updates in one replica.
Args:
update_fn: A callable to update the variable. It should has the same
signature as `Variable.assign()`.
value: value to be passed to `update_fn`.
**kwargs: remaining arguments to `update_fn`.
Returns:
Updated variable or `tf.Operation`.
"""
if self._policy:
return self._policy._update_replica(self, update_fn, value, **kwargs) # pylint: disable=protected-access
raise NotImplementedError("should be implemented by subclass.")
def _update(self, update_fn, value, **kwargs):
"""Applies updates depending on the context.
The method calls `_update_replica` in replica context,
`_update_cross_replica` in cross replica context, and `update_fn` in update
context.
If `read_value` is True, the method returns the updated Variable. If
`read_value` is False, the method returns the update `tf.Operation`.
Args:
update_fn: A callable to pass to `strategy.extended.update` to update the
variable. It should have the same signature as `Variable.assign()`.
value: value to be passed to `update_fn`.
**kwargs: keyword arguments to `update_fn`.
Returns:
Updated variable or `tf.Operation`.
"""
if values_util.is_saving_non_distributed():
return update_fn(self._primary, value, **kwargs)
with ds_context.enter_or_assert_strategy(self.distribute_strategy):
if ds_context.in_cross_replica_context():
update_replica_id = distribute_lib.get_update_replica_id()
if update_replica_id is not None:
replica_value = self._get_replica(update_replica_id)
return update_fn(replica_value, value, **kwargs)
return self._update_cross_replica(update_fn, value, **kwargs)
else:
values_util.assert_replica_context(self.distribute_strategy)
return self._update_replica(update_fn, value, **kwargs)
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
if values_util.is_saving_non_distributed():
return ops.convert_to_tensor(
self._primary, dtype=dtype, name=name, as_ref=as_ref)
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
return ops.convert_to_tensor(
self._get(), dtype=dtype, name=name, as_ref=as_ref)
def _map_resources(self, save_options):
"""For implementing `Trackable`."""
# Initialize for self._primary first, so that obj_map[self._primary] and
# resource_map[self._primary.handle] contain mapped values.
obj_map, resource_map = self._primary._map_resources(save_options) # pylint:disable=protected-access
for v in [v for v in self._values if v != self._primary]:
if (save_options.experimental_variable_policy # pylint:disable=protected-access
._expand_distributed_variables()):
v_obj_map, v_resource_map = v._map_resources(save_options) # pylint:disable=protected-access
obj_map.update(v_obj_map)
resource_map.update(v_resource_map)
else:
obj_map[v] = obj_map[self._primary]
resource_map[v.handle] = resource_map[self._primary.handle]
obj_map[self] = obj_map[self._primary]
resource_map[self] = resource_map[self._primary.handle]
if self._packed_var is not None:
resource_map[self._packed_var.packed_handle] = resource_map[
self._primary.handle]
return obj_map, resource_map
# We extend from `saveable_object.SaveableObject` instead of
# `saveable_object_util.ResourceVariableSaveable` since we need to read the
# value of ONREAD variables when saving. `SaveableObject` provides a way to
# specify the function to run to get the value of the variable or tensor at
# saving time. We can use this for both ON_READ and ON_WRITE variables.
# TODO(b/164586507): Consolidate ON_WRITE and ON_READ saving/restoring logic
# if possible.
class _DistributedVariableSaveable(saveable_object.SaveableObject):
"""Class for defining how to restore a DistributedVariable."""
def __init__(self, distributed_variable, primary_variable, name):
self._distributed_variable = distributed_variable
if not self._distributed_variable._policy:
raise ValueError("VariablePolicy has not been set for the distributed "
"variable.")
tensor, spec = distributed_variable._policy.get_saveable(
distributed_variable, primary_variable, name)
super(_DistributedVariableSaveable, self).__init__(tensor, spec, name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
tensor, = restored_tensors
return self._distributed_variable._policy.get_restore_ops( # pylint: disable=protected-access
self._distributed_variable, tensor)
class _MirroredSaveable(saveable_object.SaveableObject):
"""Class for defining how to restore a MirroredVariable."""
def __init__(self, mirrored_variable, primary_variable, name):
self._mirrored_variable = mirrored_variable
tensor, spec = values_util.get_on_write_saveable(self._mirrored_variable,
primary_variable,
name)
super(_MirroredSaveable, self).__init__(tensor, spec, name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
tensor, = restored_tensors
return values_util.get_on_write_restore_ops(self._mirrored_variable,
tensor)
class MirroredVariable(DistributedVariable, Mirrored):
"""Holds a map from replica to variables whose values are kept in sync."""
def _update_replica(self, update_fn, value, **kwargs):
return _on_write_update_replica(self, update_fn, value, **kwargs)
def scatter_min(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_min(*args, **kwargs)
if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and
self._aggregation != vs.VariableAggregation.NONE):
raise NotImplementedError(values_util.scatter_error_msg.format(
op_name="scatter_min", aggregation=self._aggregation))
return super(MirroredVariable, self).scatter_min(*args, **kwargs)
def scatter_max(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_max(*args, **kwargs)
if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and
self._aggregation != vs.VariableAggregation.NONE):
raise NotImplementedError(values_util.scatter_error_msg.format(
op_name="scatter_max", aggregation=self._aggregation))
return super(MirroredVariable, self).scatter_max(*args, **kwargs)
def scatter_update(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_update(*args, **kwargs)
if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and
self._aggregation != vs.VariableAggregation.NONE):
raise NotImplementedError(values_util.scatter_error_msg.format(
op_name="scatter_update", aggregation=self._aggregation))
return super(MirroredVariable, self).scatter_update(*args, **kwargs)
def _get_cross_replica(self):
# Return identity, to avoid directly exposing the variable to the user and
# allowing it to be modified by mistake.
return array_ops.identity(Mirrored._get_cross_replica(self))
def _as_graph_element(self):
return self._get_on_device_or_primary()._as_graph_element() # pylint: disable=protected-access
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
MirroredVariables.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _MirroredSaveable(self, self._primary, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
# TODO(b/154017756): Make _dense_var_to_tensor consistent between ON_READ
# and ON_WRITE.
# Try to avoid assignments to and other mutations of MirroredVariable
# state except through a DistributionStrategy.extended.update() or any of
# the `assign*` and `scatter*` calls.
if as_ref:
# A TF 1.x case where the variable is a boolean variable and used like:
# tf.cond(v, true_fn, false_fn).
raise ValueError(
"You may be using variable created under distribute strategy in TF "
"1.x control flows. Try explicitly converting the variable to Tensor "
"using variable.read_value(), or switch to TF 2.x.")
return ops.convert_to_tensor(
self._get(), dtype=dtype, name=name, as_ref=as_ref)
class _SyncOnReadSaveable(saveable_object.SaveableObject):
"""Class for defining how to restore a SyncOnReadVariable."""
def __init__(self, sync_on_read_variable, name):
self._sync_on_read_variable = sync_on_read_variable
tensor, spec = values_util.get_on_read_saveable(
sync_on_read_variable, sync_on_read_variable._primary, name)
super(_SyncOnReadSaveable, self).__init__(tensor, spec, name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
tensor, = restored_tensors
return values_util.get_on_read_restore_ops(
self._sync_on_read_variable, tensor,
self._sync_on_read_variable.aggregation)
class SyncOnReadVariable(DistributedVariable):
"""Holds a map from replica to variables whose values are reduced on save."""
def _update_replica(self, update_fn, value, **kwargs):
return update_fn(self._get_on_device_or_primary(), value, **kwargs)
# TODO(b/154017756): Make assign behaivor in cross replica context consistent
# with MirroredVariable.
def assign_sub(self, value, use_locking=False, name=None, read_value=True):
if values_util.is_saving_non_distributed():
return self._primary.assign_sub(value, use_locking, name, read_value)
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if (ds_context.in_cross_replica_context() and
not values_util.in_replica_update_context()):
return values_util.on_read_assign_sub_cross_replica(
self, value, read_value=read_value)
else:
return super(SyncOnReadVariable,
self).assign_sub(value, use_locking, name, read_value)
def assign_add(self, value, use_locking=False, name=None, read_value=True):
if values_util.is_saving_non_distributed():
return self._primary.assign_add(value, use_locking, name, read_value)
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if (ds_context.in_cross_replica_context() and
not values_util.in_replica_update_context()):
return values_util.on_read_assign_add_cross_replica(
self, value, read_value=read_value)
else:
return super(SyncOnReadVariable,
self).assign_add(value, use_locking, name, read_value)
def assign(self, value, use_locking=False, name=None, read_value=True):
if values_util.is_saving_non_distributed():
return self._primary.assign(value, use_locking, name, read_value)
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if (ds_context.in_cross_replica_context() and
not values_util.in_replica_update_context()):
return values_util.on_read_assign_cross_replica(
self, value, read_value=read_value)
else:
return super(SyncOnReadVariable,
self).assign(value, use_locking, name, read_value)
def _scatter_not_implemented(self, method):
raise NotImplementedError(
"Variables with `synchronization=ON_READ` doesn't support `%s`" %
method)
def scatter_sub(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_sub(*args, **kwargs)
self._scatter_not_implemented("scatter_sub")
def scatter_add(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_add(*args, **kwargs)
self._scatter_not_implemented("scatter_add")
def scatter_mul(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_mul(*args, **kwargs)
self._scatter_not_implemented("scatter_mul")
def scatter_div(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_div(*args, **kwargs)
self._scatter_not_implemented("scatter_div")
def scatter_min(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_min(*args, **kwargs)
self._scatter_not_implemented("scatter_min")
def scatter_max(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_max(*args, **kwargs)
self._scatter_not_implemented("scatter_max")
def scatter_update(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_update(*args, **kwargs)
self._scatter_not_implemented("scatter_update")
def value(self):
if values_util.is_saving_non_distributed():
return self._primary.value()
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if (ds_context.in_cross_replica_context() and
not values_util.in_replica_update_context()):
if self._aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
return self._get_replica(0).value()
return self._get_cross_replica()
else:
# _get_on_device_or_primary() returns a Variable.
return self._get_on_device_or_primary().value()
def _get_cross_replica(self):
if self._aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
# Consider returning a tensor value here to make the return value of
# _get_cross_replica consistent.
return self._get_replica(0)
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
return self._distribute_strategy.reduce(
reduce_util.ReduceOp.from_variable_aggregation(self._aggregation),
self,
axis=None)
def _as_graph_element(self):
if values_util.is_saving_non_distributed():
return self._primary._as_graph_element() # pylint: disable=protected-access
# pylint: disable=protected-access
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if ds_context.in_cross_replica_context():
return ops.convert_to_tensor(self._get_cross_replica())
return self._get()._as_graph_element()
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
`SyncOnReadVariable`s.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _SyncOnReadSaveable(self, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
# Register a conversion functions which reads the value of the variable,
# allowing instances of the class to be used as tensors.
# DistributedVariable
def _tensor_conversion_distributed_var(var, dtype=None, name=None,
as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(DistributedVariable,
_tensor_conversion_distributed_var)
# MirroredVariables
def _tensor_conversion_mirrored(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(MirroredVariable,
_tensor_conversion_mirrored)
# Mirrored Values
def _tensor_conversion_mirrored_val(value, dtype=None, name=None, as_ref=False):
return ops.convert_to_tensor(
value._get(), dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(Mirrored,
_tensor_conversion_mirrored_val)
# SyncOnReadVariables
def _tensor_conversion_sync_on_read(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(SyncOnReadVariable,
_tensor_conversion_sync_on_read)
class VariablePolicy(object):
"""Policy defining synchronization and aggregation of a distributed variable.
Given `synchronization` and `aggregation` parameters set on a `tf.Variable`
during variable creation within `tf.distribute` scope, `tf.distribute` creates
an appropriate policy object and assigns it to the distributed variable. All
variable operations are delegated to the respective policy object.
"""
def __init__(self, aggregation):
self._aggregation = aggregation
def value(self):
raise NotImplementedError(
"This method should be overridden by sub-classes.")
def _is_mirrored(self):
raise NotImplementedError(
"This method should be overridden by sub-classes.")
def _as_graph_element(self, _):
raise NotImplementedError(
"This method should be overridden by sub-classes.")
def _get_cross_replica(self, var):
raise NotImplementedError(
"This method should be overridden by sub-classes.")
def _update_replica(self, var, update_fn, value, **kwargs):
raise NotImplementedError(
"This method should be overridden by sub-classes.")
class OnReadPolicy(VariablePolicy):
"""Policy defined for `tf.VariableSynchronization.ON_READ` synchronization.
This policy is created when `synchronization` is set to
`tf.VariableSynchronization.ON_READ` and `aggregation` is set to any of the
values allowed by the `tf.VariableAggregation` enum such as `NONE`, `SUM`,
`MEAN` or `ONLY_FIRST_REPLICA`when creating a `tf.Variable` in `tf.distribute`
scope.
"""
def _is_mirrored(self):
return False
def value(self, var):
with ds_context.enter_or_assert_strategy(var.distribute_strategy):
if (ds_context.in_cross_replica_context() and
not values_util.in_replica_update_context()):
if self._aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
return var._get_replica(0).value() # pylint: disable=protected-access
return var._get_cross_replica() # pylint: disable=protected-access
else:
return var._get_on_device_or_primary().value() # pylint: disable=protected-access
def _as_graph_element(self, var):
with ds_context.enter_or_assert_strategy(var.distribute_strategy):
if ds_context.in_cross_replica_context():
return ops.convert_to_tensor(var._get_cross_replica()) # pylint: disable=protected-access
return var._get()._as_graph_element() # pylint: disable=protected-access
def _get_cross_replica(self, var):
if self._aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
return var._get_replica(0) # pylint: disable=protected-access
with ds_context.enter_or_assert_strategy(var.distribute_strategy):
return var.distribute_strategy.reduce(
reduce_util.ReduceOp.from_variable_aggregation(self._aggregation),
var,
axis=None)
def _update_replica(self, var, update_fn, value, **kwargs):
return update_fn(var._get_on_device_or_primary(), value, **kwargs) # pylint: disable=protected-access
def _scatter_not_implemented(self, method):
raise NotImplementedError(
"ON_READ variables doesn't support `%s` in cross replica context" %
method)
def assign_sub(self, var, value, use_locking=False, name=None,
read_value=True):
"""Subtracts a value from this variable."""
with ds_context.enter_or_assert_strategy(var.distribute_strategy):
if (ds_context.in_cross_replica_context() and
not values_util.in_replica_update_context()):
return values_util.on_read_assign_sub_cross_replica(
var, value, read_value=read_value)
else:
return values_util.on_write_assign_sub(
var, value, use_locking=use_locking, name=name,
read_value=read_value)
def assign_add(self, var, value, use_locking=False, name=None,
read_value=True):
"""Adds a value to this variable."""
with ds_context.enter_or_assert_strategy(var.distribute_strategy):
if (ds_context.in_cross_replica_context() and
not values_util.in_replica_update_context()):
return values_util.on_read_assign_add_cross_replica(
var, value, read_value=read_value)
else:
return values_util.on_write_assign_add(
var, value, use_locking=use_locking, name=name,
read_value=read_value)
def assign(self, var, value, use_locking=False, name=None, read_value=True):
with ds_context.enter_or_assert_strategy(var.distribute_strategy):
if (ds_context.in_cross_replica_context() and
not values_util.in_replica_update_context()):
return values_util.on_read_assign_cross_replica(var, value,
read_value=read_value)
else:
return values_util.on_write_assign(var, value,
use_locking=use_locking,
name=name,
read_value=read_value)
def scatter_sub(self, *args, **kwargs):
del args, kwargs
self._scatter_not_implemented("scatter_sub")
def scatter_add(self, *args, **kwargs):
del args, kwargs
self._scatter_not_implemented("scatter_add")
def scatter_mul(self, *args, **kwargs):
del args, kwargs
self._scatter_not_implemented("scatter_mul")
def scatter_div(self, *args, **kwargs):
del args, kwargs
self._scatter_not_implemented("scatter_div")
def scatter_min(self, *args, **kwargs):
del args, kwargs
self._scatter_not_implemented("scatter_min")
def scatter_max(self, *args, **kwargs):
del args, kwargs
self._scatter_not_implemented("scatter_max")
def scatter_update(self, *args, **kwargs):
del args, kwargs
self._scatter_not_implemented("scatter_update")
def get_saveable(self, var, primary_var, name):
"""Create a saveable object for the given variable."""
return values_util.get_on_read_saveable(var, primary_var, name)
def get_restore_ops(self, var, tensor):
"""Restore the same value into all variables."""
return values_util.get_on_read_restore_ops(var, tensor, self._aggregation)
class AutoPolicy(VariablePolicy):
"""Policy defined for `tf.VariableSynchronization.AUTO` synchronization.
This policy is created when `synchronization` is set to
`tf.VariableSynchronization.AUTO` and `aggregation` is set to
`tf.VariableAggregation.NONE` when creating a `tf.Variable` in `tf.distribute`
scope.
"""
def _is_mirrored(self):
return True
def value(self, var):
return var._get_on_device_or_primary().value() # pylint: disable=protected-access
def _as_graph_element(self, var):
return var._get_on_device_or_primary()._as_graph_element() # pylint: disable=protected-access
def _get_cross_replica(self, var):
# Return identity, to avoid directly exposing the variable to the user and
# allowing it to be modified by mistake.
return array_ops.identity(var._get_on_device_or_primary()) # pylint: disable=protected-access
def _update_replica(self, var, update_fn, value, **kwargs):
return update_fn(var._get_on_device_or_primary(), value, **kwargs) # pylint: disable=protected-access
def assign(self, var, value, use_locking=False, name=None, read_value=True):
return values_util.on_write_assign(var, value, use_locking=use_locking,
name=name, read_value=read_value)
def assign_add(self, var, value, use_locking=False, name=None,
read_value=True):
return values_util.on_write_assign_add(var, value, use_locking=use_locking,
name=name, read_value=read_value)
def assign_sub(self, var, value, use_locking=False, name=None,
read_value=True):
return values_util.on_write_assign_sub(var, value, use_locking=use_locking,
name=name, read_value=read_value)
def scatter_sub(self, var, sparse_delta, use_locking=False, name=None):
return values_util.scatter_sub(var, sparse_delta, use_locking=use_locking,
name=name)
def scatter_add(self, var, sparse_delta, use_locking=False, name=None):
return values_util.scatter_add(var, sparse_delta, use_locking=use_locking,
name=name)
def scatter_mul(self, var, sparse_delta, use_locking=False, name=None):
return values_util.scatter_mul(var, sparse_delta, use_locking=use_locking,
name=name)
def scatter_div(self, var, sparse_delta, use_locking=False, name=None):
return values_util.scatter_div(var, sparse_delta, use_locking=use_locking,
name=name)
def scatter_min(self, var, sparse_delta, use_locking=False, name=None):
if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and
self._aggregation != vs.VariableAggregation.NONE):
raise NotImplementedError(values_util.scatter_error_msg.format(
op_name="scatter_min", aggregation=self._aggregation))
return values_util.scatter_min(var, sparse_delta, use_locking=use_locking,
name=name)
def scatter_max(self, var, sparse_delta, use_locking=False, name=None):
if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and
self._aggregation != vs.VariableAggregation.NONE):
raise NotImplementedError(values_util.scatter_error_msg.format(
op_name="scatter_max", aggregation=self._aggregation))
return values_util.scatter_max(var, sparse_delta, use_locking=use_locking,
name=name)
def scatter_update(self, var, sparse_delta, use_locking=False, name=None):
if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and
self._aggregation != vs.VariableAggregation.NONE):
raise NotImplementedError(values_util.scatter_error_msg.format(
op_name="scatter_update", aggregation=self._aggregation))
return values_util.scatter_update(var, sparse_delta,
use_locking=use_locking,
name=name)
def get_saveable(self, var, primary_var, name):
"""Saveable ops for AUTO variables."""
return values_util.get_on_write_saveable(var, primary_var, name)
def get_restore_ops(self, var, tensor):
return values_util.get_on_write_restore_ops(var, tensor)
class OnWritePolicy(AutoPolicy):
"""Policy defined for `tf.VariableSynchronization.ON_WRITE` synchronization.
This policy is created when the following `synchronization` and
`aggregation` parameters are specified when creating a `tf.Variable` in
`tf.distribute` scope:
* `synchronization` is equal to `tf.VariableSynchronization.AUTO` and
aggregation can be any of the following `tf.VariableAggregation` enum
values such as `SUM`, `MEAN` or `ONLY_FIRST_REPLICA`.
* `synchronization` is equal to `tf.VariableSynchronization.ON_WRITE` and
aggregation can be any of the following `tf.VariableAggregation` enum
values such as `NONE`, `SUM`, `MEAN` or `ONLY_FIRST_REPLICA`.
"""
def _update_replica(self, var, update_fn, value, **kwargs):
return _on_write_update_replica(var, update_fn, value, **kwargs)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for OptimizerV2."""
import collections
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import losses
from tensorflow.python.keras import optimizer_v1
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adadelta
from tensorflow.python.keras.optimizer_v2 import adagrad
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.keras.optimizer_v2 import adamax
from tensorflow.python.keras.optimizer_v2 import ftrl
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.optimizer_v2 import nadam
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import util as trackable_utils
_DATA_TYPES = [dtypes.half, dtypes.float32, dtypes.float64]
# TODO(b/141710709): complex support in NVCC and ROCM.
if (not test_util.IsBuiltWithNvcc() and not test.is_built_with_rocm()):
_DATA_TYPES += [dtypes.complex64, dtypes.complex128]
class OptimizerTest(test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testBasic(self):
for dtype in _DATA_TYPES:
with testing_utils.use_gpu():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, var_list=[var0, var1])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testAdaptiveLearningRate(self):
for dtype in _DATA_TYPES:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
def loss():
return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd = gradient_descent.SGD(1.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, [var0, var1])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
# var0 = [1., 2.] - 1.0 * [5, 5]
self.assertAllClose([-4., -3.], self.evaluate(var0))
# var1 = [3., 4.] - 1.0 * [3, 3]
self.assertAllClose([0., 1.], self.evaluate(var1))
sgd.learning_rate = 0.5
if context.executing_eagerly():
sgd.minimize(loss, [var0, var1])
else:
self.evaluate(opt_op)
# Validate updated params
# var0 = [-4., -3.] - 0.5 * [5, 5]
self.assertAllClose([-6.5, -5.5], self.evaluate(var0))
# var1 = [0., 1.] - 0.5 * [3, 3]
self.assertAllClose([-1.5, -0.5], self.evaluate(var1))
sgd.learning_rate = learning_rate_schedule.InverseTimeDecay(
0.5, decay_steps=1.0, decay_rate=0.5)
if context.executing_eagerly():
sgd.minimize(loss, [var0, var1])
else:
self.evaluate(opt_op)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testPrecomputedGradient(self):
for dtype in _DATA_TYPES:
with testing_utils.use_gpu():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
grad_loss = constant_op.constant([42, -42], dtype=dtype)
sgd = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, var_list=[var0, var1], grad_loss=grad_loss)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)],
self.evaluate(var0))
self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
self.evaluate(var1))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testNoGradients(self):
for dtype in _DATA_TYPES:
with testing_utils.use_gpu():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 # pylint: disable=cell-var-from-loop
sgd_op = gradient_descent.SGD(3.0)
with self.assertRaisesRegex(ValueError, 'No gradients'):
# var1 has no gradient
sgd_op.minimize(loss, var_list=[var1])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testNoGradientsForAnyVariables_Minimize(self):
for dtype in _DATA_TYPES:
with testing_utils.use_gpu():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: constant_op.constant(5.0)
sgd_op = gradient_descent.SGD(3.0)
with self.assertRaisesRegex(ValueError,
'No gradients provided for any variable'):
sgd_op.minimize(loss, var_list=[var0, var1])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testNoGradientsForAnyVariables_ApplyGradients(self):
for dtype in _DATA_TYPES:
with testing_utils.use_gpu():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
sgd_op = gradient_descent.SGD(3.0)
with self.assertRaisesRegex(ValueError,
'No gradients provided for any variable'):
sgd_op.apply_gradients([(None, var0), (None, var1)])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testGradientsAsVariables(self):
for i, dtype in enumerate(_DATA_TYPES):
with testing_utils.use_gpu():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd = gradient_descent.SGD(3.0)
grads_and_vars = sgd._compute_gradients(loss, [var0, var1])
# Convert gradients to tf.Variables
converted_grads = [
variables.Variable(
array_ops.zeros([2], dtype), name='c_%d_%d' % (i, j))
for j, gv in enumerate(grads_and_vars)
]
convert_ops = [
state_ops.assign(converted_grads[j], gv[0])
for j, gv in enumerate(grads_and_vars)
]
# Run convert_ops to achieve the gradients converting
self.evaluate(variables.global_variables_initializer())
self.evaluate(convert_ops)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
converted_grads_and_vars = list(zip(converted_grads, [var0, var1]))
opt_op = sgd.apply_gradients(converted_grads_and_vars)
self.evaluate(variables.global_variables_initializer())
self.evaluate(convert_ops)
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testComputeGradientsWithTensors(self):
with testing_utils.use_gpu():
x = ops.convert_to_tensor_v2_with_dispatch(1.0)
def f():
return x * x
sgd = gradient_descent.SGD(3.0)
grads_and_vars = sgd._compute_gradients(f, [x])
self.assertLen(grads_and_vars, 1)
grad, x_as_var = grads_and_vars[0]
self.assertIs(x, x_as_var)
self.assertEqual(2.0, self.evaluate(grad))
with self.assertRaises(NotImplementedError):
sgd.apply_gradients(grads_and_vars)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testConstraint(self):
constraint_01 = lambda x: clip_ops.clip_by_value(x, -0.1, 0.)
constraint_0 = lambda x: clip_ops.clip_by_value(x, 0., 1.)
with testing_utils.use_gpu():
var0 = variables.Variable([1.0, 2.0],
constraint=constraint_01)
var1 = variables.Variable([3.0, 4.0],
constraint=constraint_0)
loss = lambda: 5 * var0 + 3 * var1
sgd = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, var_list=[var0, var1])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-0.1, -0.1], self.evaluate(var0))
self.assertAllClose([0., 0.], self.evaluate(var1))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testIterationWithoutMinimize(self):
with testing_utils.use_gpu():
sgd = gradient_descent.SGD(3.0)
self.evaluate(sgd.iterations.initializer)
self.assertEqual(0, self.evaluate(sgd.iterations))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testConfig(self):
with testing_utils.use_gpu():
opt = gradient_descent.SGD(learning_rate=1.0)
config = opt.get_config()
opt2 = gradient_descent.SGD.from_config(config)
lr = opt._get_hyper('learning_rate')
lr2 = opt2._get_hyper('learning_rate')
self.evaluate(variables.global_variables_initializer())
# assert both are equal float values.
self.assertEqual(self.evaluate(lr), self.evaluate(lr2))
var0 = variables.Variable([[1.0], [2.0]], dtype=dtypes.float32)
loss = lambda: 3 * var0
# learning rate variable created when calling minimize.
opt.minimize(loss, [var0])
opt3 = gradient_descent.SGD.from_config(config)
lr3 = opt3._get_hyper('learning_rate')
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(lr), self.evaluate(lr3))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testConfigWithLearningRateDecay(self):
with testing_utils.use_gpu():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtypes.float32)
for decay_schedule in [
learning_rate_schedule.InverseTimeDecay(
0.5, decay_steps=1.0, decay_rate=0.1),
learning_rate_schedule.PiecewiseConstantDecay(
[5], [1., .5])
]:
step = 10
opt = gradient_descent.SGD(decay_schedule)
config = opt.get_config()
opt2 = gradient_descent.SGD.from_config(config)
# assert both are equal float values.
self.assertAllEqual(
decay_schedule(step),
opt._get_hyper('learning_rate')(step))
self.assertAllEqual(
decay_schedule(step),
opt2._get_hyper('learning_rate')(step))
loss = lambda: 3 * var0
# learning rate variable is created when calling minimize.
opt.minimize(loss, [var0])
self.evaluate(variables.global_variables_initializer())
config = opt.get_config()
opt3 = gradient_descent.SGD.from_config(config)
self.assertAllEqual(
self.evaluate(opt._get_hyper('learning_rate')(step)),
opt3._get_hyper('learning_rate')(step))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testGradClipValue(self):
with testing_utils.use_gpu():
var = variables.Variable([1.0, 2.0])
loss = lambda: 3 * var
opt = gradient_descent.SGD(learning_rate=1.0, clipvalue=1.0)
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0., 1.], self.evaluate(var))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testGradClipNorm(self):
with testing_utils.use_gpu():
var = variables.Variable([1.0])
loss = lambda: 3 * var
opt = gradient_descent.SGD(learning_rate=1.0, clipnorm=1.0)
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0.], self.evaluate(var))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testGradGlobalClipNorm(self):
with testing_utils.use_gpu():
# l2 norm is 5.0
var1 = variables.Variable([1.0])
var2 = variables.Variable([2.0])
loss = lambda: 3 * var1 + 4 * var2
opt = gradient_descent.SGD(learning_rate=1.0, global_clipnorm=2.0)
opt_op = opt.minimize(loss, [var1, var2])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# grad1 = 3.0 * 2.0 / 5.0 = 1.2
self.assertAllClose([-.2], self.evaluate(var1))
# grad2 = 4.0 * 2.0 / 5.0 = 1.6
self.assertAllClose([.4], self.evaluate(var2))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInvalidClipNorm(self):
with self.assertRaisesRegex(ValueError, '>= 0'):
gradient_descent.SGD(learning_rate=1.0, clipnorm=-1.0)
@combinations.generate(
combinations.combine(
mode=['graph', 'eager'],
clip_type=['clipnorm', 'global_clipnorm', 'clipvalue']))
def testConfigWithCliping(self, clip_type):
opt = gradient_descent.SGD(learning_rate=1.0, **{clip_type: 2.0})
config = opt.get_config()
opt = gradient_descent.SGD.from_config(config)
self.assertEqual(getattr(opt, clip_type), 2.0)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInvalidKwargs(self):
with self.assertRaisesRegex(TypeError, 'Unexpected keyword argument'):
gradient_descent.SGD(learning_rate=1.0, invalidkwargs=1.0)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testWeights(self):
with testing_utils.use_gpu():
opt1 = adam.Adam(learning_rate=1.0)
var1 = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
loss1 = lambda: 3 * var1
opt_op_1 = opt1.minimize(loss1, [var1])
self.evaluate(variables.global_variables_initializer())
config = opt1.get_config()
opt2 = adam.Adam.from_config(config)
var2 = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
loss2 = lambda: 3 * var2
opt_op_2 = opt2.minimize(loss2, [var2])
weights = opt1.get_weights()
# Assert set_weights and both variables get updated to same value.
self.evaluate(variables.global_variables_initializer())
opt2.set_weights(weights)
self.evaluate([opt_op_1, opt_op_2])
self.assertAllClose(self.evaluate(var1), self.evaluate(var2))
self.assertEqual(1, self.evaluate(opt1.iterations))
self.assertEqual(1, self.evaluate(opt2.iterations))
var3 = variables.Variable([1.0, 2.0, 3.0], dtype=dtypes.float32)
var4 = variables.Variable([4.0, 5.0, 6.0], dtype=dtypes.float32)
loss3 = lambda: 3 * var3 + 5 * var4
opt_op_3 = opt1.minimize(loss3, [var3, var4])
# Assert set_weights with ValueError since weight list does not match.
self.evaluate(variables.global_variables_initializer())
weights = opt1.get_weights()
with self.assertRaisesRegex(ValueError, 'but the optimizer was'):
opt2.set_weights(weights)
# Assert set_weights and variables get updated to same value.
var5 = variables.Variable([1.0, 2.0, 3.0], dtype=dtypes.float32)
var6 = variables.Variable([4.0, 5.0, 6.0], dtype=dtypes.float32)
loss4 = lambda: 3 * var5 + 5 * var6
opt_op_4 = opt2.minimize(loss4, [var5, var6])
self.evaluate(variables.global_variables_initializer())
opt2.set_weights(weights)
self.evaluate([opt_op_3, opt_op_4])
self.assertAllClose(
self.evaluate([var3, var4]), self.evaluate([var5, var6]))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testGettingHyperParameters(self):
with self.test_session():
opt = adam.Adam(learning_rate=1.0)
var = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
loss = lambda: 3 * var
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
lr = self.evaluate(opt.lr)
self.assertEqual(1.0, lr)
opt.lr = 2.0
lr = self.evaluate(opt.lr)
self.assertEqual(2.0, lr)
self.evaluate(opt.lr.assign(3.0))
lr = self.evaluate(opt.lr)
self.assertEqual(3.0, lr)
with self.assertRaises(AttributeError):
opt.not_an_attr += 3
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testGettingHyperParametersWithLrInConstructor(self):
with self.test_session():
opt = gradient_descent.SGD(lr=3.0)
var = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
loss = lambda: 3 * var
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt.learning_rate, variables.Variable)
lr = self.evaluate(opt.lr)
self.assertEqual(3.0, lr)
opt.lr = 2.0
lr = self.evaluate(opt.lr)
self.assertEqual(2.0, lr)
self.evaluate(opt.lr.assign(4.0))
lr = self.evaluate(opt.lr)
self.assertEqual(4.0, lr)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testDir(self):
opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.1)
dir_result = set(dir(opt))
self.assertIn('learning_rate', dir_result) # Hyperparameter
self.assertIn('lr', dir_result) # Hyperparameter
self.assertIn('momentum', dir_result) # Hyperparameter
self.assertIn('nesterov', dir_result) # Attribute
self.assertIn('minimize', dir_result) # Attribute
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testOptimizerWithKerasModel(self):
a = input_layer.Input(shape=(3,), name='input_a')
b = input_layer.Input(shape=(3,), name='input_b')
dense = core.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = core.Dropout(0.5, name='dropout')(c)
model = training.Model([a, b], [d, e])
optimizer = gradient_descent.SGD(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.fit([input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testOptimizerWithCallbacks(self):
np.random.seed(1331)
input_np = np.random.random((10, 3))
output_np = np.random.random((10, 4))
a = input_layer.Input(shape=(3,), name='input_a')
model = sequential.Sequential()
model.add(core.Dense(4, kernel_initializer='zeros', name='dense'))
model.add(core.Dropout(0.5, name='dropout'))
model(a)
optimizer = gradient_descent.SGD(learning_rate=0.1)
model.compile(optimizer, loss='mse', metrics=['mae'])
# This does not reduce the LR after the first epoch (due to low delta).
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.1, min_delta=0, patience=1, cooldown=5)
]
model.fit(
input_np,
output_np,
batch_size=10,
validation_data=(input_np, output_np),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
# This should reduce the LR after the first epoch (due to high delta).
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
input_np,
output_np,
batch_size=10,
validation_data=(input_np, output_np),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def testOptimizerSetIterations(self):
global_step = training_util.get_or_create_global_step()
opt = adam.Adam(learning_rate=1.0)
opt.iterations = global_step
var = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
self.evaluate(variables.global_variables_initializer())
init_step_value = self.evaluate(global_step)
loss = lambda: 3 * var
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
new_step_value = self.evaluate(global_step)
self.assertEqual(new_step_value, init_step_value + 1)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testOptimizerWithCallableVarList(self):
train_samples = 20
input_dim = 1
num_classes = 2
(x, y), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
y = np_utils.to_categorical(y)
num_hidden = 1
model = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes)
opt = adam.Adam()
loss = lambda: losses.mean_squared_error(model(x), y)
var_list = lambda: model.trainable_weights
with self.assertRaisesRegex(
ValueError, 'Weights for model .* have not yet been created'):
var_list()
train_op = opt.minimize(loss, var_list)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
[[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
self.evaluate(train_op)
self.assertNotEqual(
[[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
self.assertLen(var_list(), 4)
def testVarKey(self):
with ops.get_default_graph().as_default():
a = variables.Variable([1., 2.], name='var')
b = variables.Variable([1.], name='var')
self.assertTrue(a._in_graph_mode)
self.assertTrue(b._in_graph_mode)
var_key = optimizer_v2._var_key(a)
self.assertEqual('var', var_key)
var_key = optimizer_v2._var_key(b)
self.assertEqual('var_1', var_key)
def testVarName(self):
with ops.get_default_graph().as_default():
var = variables.Variable([1., 2.], name='var')
loss = var + 1.
opt = adam.Adam()
opt.get_updates(loss, [var])
opt_vars = opt.variables()
self.assertLen(opt_vars, 3)
self.assertEqual('Adam/iter:0', opt_vars[0].name)
self.assertEqual('Adam/var/m:0', opt_vars[1].name)
var_2 = variables.Variable([1., 2.], name='var_2')
loss = var_2 + 1.
with backend.name_scope('outter'):
opt.get_updates(loss, [var_2])
opt_vars = opt.variables()
self.assertLen(opt_vars, 5)
self.assertEqual('outter/Adam/var_2/m:0', opt_vars[3].name)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testEmptyVarList(self):
opt = gradient_descent.SGD(1.)
opt.minimize(lambda: constant_op.constant(1.), [])
opt.apply_gradients([])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testAggregationTrue(self):
# Test that experimental_aggregate_gradients=True works without distributed
# strategy.
var = variables.Variable([1., 2.])
opt = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1., 2.], self.evaluate(var))
opt_op = opt.apply_gradients([([0.1, 0.1], var)],
experimental_aggregate_gradients=True)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0.7, 1.7], self.evaluate(var))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testAggregationFalse(self):
# Test that experimental_aggregate_gradients=False works without distributed
# strategy.
var = variables.Variable([1., 2.])
opt = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1., 2.], self.evaluate(var))
opt_op = opt.apply_gradients([([0.1, 0.1], var)],
experimental_aggregate_gradients=False)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0.7, 1.7], self.evaluate(var))
@combinations.generate(combinations.combine(mode=['eager']))
def testRestoringIterationsWithoutAnOptimizer(self):
opt = gradient_descent.SGD(3.0)
opt.iterations.assign(5)
checkpoint = trackable_utils.Checkpoint(optimizer=opt)
path = checkpoint.save(self.get_temp_dir())
# Following verifies that the `iterations` can be restored with the absence
# of an `Optimizer` object (using a `Checkpoint` as a placeholder).
iterations_var = variables.Variable(0, dtype=dtypes.int64)
optimizer_checkpoint = trackable_utils.Checkpoint(iter=iterations_var)
checkpoint_to_restore = trackable_utils.Checkpoint(
optimizer=optimizer_checkpoint)
checkpoint_to_restore.restore(path)
self.assertEqual(5, self.evaluate(iterations_var))
@combinations.generate(combinations.combine(mode=['eager']))
def testSlotWithNonstandardShapeRestoresBasedOnCheckpoint(self):
# First create an optimizer and a slot variable with a non-standard shape.
x = variables.Variable([[1.0, 2.0], [3.0, 4.0]], dtype=dtypes.float32)
slot_shape = [2, 1]
optimizer_1 = optimizer_v2.OptimizerV2(name='test')
optimizer_1.add_slot(x, 'test_slot', 'ones', shape=slot_shape)
# Then save the variable and optimizer to a checkpoint.
checkpoint_1 = trackable_utils.Checkpoint(var=x, optimizer=optimizer_1)
checkpoint_path = checkpoint_1.save(self.get_temp_dir())
# Create a new optimizer and call restore on it (and x)
optimizer_2 = optimizer_v2.OptimizerV2(name='test')
checkpoint_2 = trackable_utils.Checkpoint(var=x, optimizer=optimizer_2)
checkpoint_2.restore(checkpoint_path)
self.assertEqual(slot_shape,
optimizer_2.get_slot(x, 'test_slot').shape.as_list())
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_gradient_aggregator(self):
def gradient_aggregator(grads_and_vars):
# Simulate an all-reduce where the other replica has zeros for gradients,
# by dividing each gradient by 2.
grads = [g for g, _ in grads_and_vars]
vars = [v for _, v in grads_and_vars] # pylint: disable=redefined-builtin
all_reduced_grads = [g / 2 for g in grads]
return list(zip(all_reduced_grads, vars))
var = variables.Variable(2.0)
sgd = gradient_descent.SGD(1.0, gradient_aggregator=gradient_aggregator)
loss = lambda: 2 * var
opt_op = sgd.minimize(loss, var_list=[var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertEqual(self.evaluate(var), 1.0)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_override_aggregate_gradients(self):
class MyOptimizer(gradient_descent.SGD):
def _aggregate_gradients(self, grads_and_vars):
# Simulate an all-reduce where the other replica has zeros for
# gradients, by dividing each gradient by 2.
grads = [g for g, _ in grads_and_vars]
vars = [v for _, v in grads_and_vars] # pylint: disable=redefined-builtin
all_reduced_grads = [g / 2 for g in grads]
return list(zip(all_reduced_grads, vars))
var = variables.Variable(2.0)
sgd = MyOptimizer(1.0)
loss = lambda: 2 * var
opt_op = sgd.minimize(loss, var_list=[var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertEqual(self.evaluate(var), 1.0)
@keras_parameterized.run_all_keras_modes
class OptimizersCompatibilityTest(keras_parameterized.TestCase):
def _testOptimizersCompatibility(self, opt_v1, opt_v2, test_weights=True):
if context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in eager mode')
np.random.seed(1331)
with testing_utils.use_gpu():
train_samples = 20
input_dim = 3
num_classes = 2
(x, y), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
y = np_utils.to_categorical(y)
num_hidden = 5
model_v1 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_v1.compile(
opt_v1,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
model_v1.fit(x, y, batch_size=5, epochs=1)
model_v2 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_v2.set_weights(model_v1.get_weights())
model_v2.compile(
opt_v2,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
if not ops.executing_eagerly_outside_functions():
model_v2._make_train_function()
if test_weights:
opt_v2.set_weights(opt_v1.get_weights())
hist_1 = model_v1.fit(x, y, batch_size=5, epochs=1, shuffle=False)
hist_2 = model_v2.fit(x, y, batch_size=5, epochs=1, shuffle=False)
self.assertAllClose(model_v1.get_weights(), model_v2.get_weights(),
rtol=1e-5, atol=1e-5)
self.assertAllClose(hist_1.history['loss'], hist_2.history['loss'],
rtol=1e-5, atol=1e-5)
def testAdadeltaCompatibility(self):
opt_v1 = optimizer_v1.Adadelta(lr=0.01)
opt_v2 = adadelta.Adadelta(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testAdagradCompatibility(self):
opt_v1 = optimizer_v1.Adagrad(lr=0.01)
opt_v2 = adagrad.Adagrad(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testAdamCompatibility(self):
opt_v1 = optimizer_v1.Adam()
opt_v2 = adam.Adam()
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testAdamaxCompatibility(self):
opt_v1 = optimizer_v1.Adamax(lr=0.01)
opt_v2 = adamax.Adamax(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testNadamCompatibility(self):
opt_v1 = optimizer_v1.Nadam(lr=0.001)
opt_v2 = nadam.Nadam(learning_rate=0.001)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testMomentumCompatibility(self):
opt_v1 = optimizer_v1.SGD(lr=0.01, momentum=0.9)
opt_v2 = gradient_descent.SGD(learning_rate=0.01, momentum=0.9)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testRMSpropCompatibility(self):
opt_v1 = optimizer_v1.RMSprop()
opt_v2 = rmsprop.RMSprop()
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testSGDCompatibility(self):
opt_v1 = optimizer_v1.SGD(lr=0.01)
opt_v2 = gradient_descent.SGD(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2, False)
def testNumericEquivalenceForNesterovMomentum(self):
if context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in eager mode')
np.random.seed(1331)
with testing_utils.use_gpu():
train_samples = 20
input_dim = 3
num_classes = 2
(x, y), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
y = np_utils.to_categorical(y)
num_hidden = 5
model_k_v1 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2.set_weights(model_k_v1.get_weights())
model_tf = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_tf.set_weights(model_k_v2.get_weights())
opt_k_v1 = optimizer_v1.SGD(momentum=0.9, nesterov=True)
opt_k_v2 = gradient_descent.SGD(momentum=0.9, nesterov=True)
opt_tf = momentum.MomentumOptimizer(
learning_rate=0.01, momentum=0.9, use_nesterov=True)
model_k_v1.compile(
opt_k_v1,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
model_k_v2.compile(
opt_k_v2,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
model_tf.compile(
opt_tf,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)
hist_tf = model_tf.fit(x, y, batch_size=5, epochs=10, shuffle=False)
self.assertAllClose(model_k_v1.get_weights(), model_tf.get_weights())
self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
self.assertAllClose(hist_k_v1.history['loss'], hist_tf.history['loss'])
self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
def testNumericEquivalenceForAmsgrad(self):
if context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in eager mode')
np.random.seed(1331)
with testing_utils.use_gpu():
train_samples = 20
input_dim = 3
num_classes = 2
(x, y), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
y = np_utils.to_categorical(y)
num_hidden = 5
model_k_v1 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2.set_weights(model_k_v1.get_weights())
opt_k_v1 = optimizer_v1.Adam(amsgrad=True)
opt_k_v2 = adam.Adam(amsgrad=True)
model_k_v1.compile(
opt_k_v1,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
model_k_v2.compile(
opt_k_v2,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)
self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
# Note: These tests are kept in a separate class to avoid bugs in some
# distributions of Python that break AutoGraph which is used by tf.function.
@combinations.generate(combinations.combine(mode=['eager']))
class OptimizerWithFunctionTest(test.TestCase, parameterized.TestCase):
def testBasic(self):
var = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
loss = lambda: 3 * var
opt = adam.Adam(learning_rate=1.0)
@def_function.function
def fn():
opt.minimize(loss, [var])
return var
self.assertAllClose([0., 1.], fn(), atol=1e-4)
self.assertAllClose([-1, 0.], fn(), atol=1e-4)
def testBasicWithConstantDecay(self):
var = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
loss = lambda: 3 * var
opt = adam.Adam(learning_rate=1.0)
@def_function.function
def fn():
opt.minimize(loss, [var])
return var
self.assertAllClose([0., 1.], fn(), atol=1e-4)
self.assertAllClose([-1, 0.], fn(), atol=1e-4)
def testVarKeyWithVarCreatedInEager(self):
a = variables.Variable([1., 2.], name='var')
b = variables.Variable([1.], name='var')
@test_util.also_run_as_tf_function
def var_key_test():
self.assertFalse(a._in_graph_mode)
self.assertFalse(b._in_graph_mode)
var_key_a = optimizer_v2._var_key(a)
self.assertStartsWith(var_key_a, 'var_')
var_key_b = optimizer_v2._var_key(b)
self.assertStartsWith(var_key_b, 'var_')
self.assertNotEqual(var_key_a, var_key_b)
var_key_test()
def testLearningRateDecayUsedInTwoFunctions(self):
a = variables.Variable([1., 2.], name='var')
b = variables.Variable([1.], name='var')
learning_rate_decay = learning_rate_schedule.InverseTimeDecay(
0.5, decay_steps=1.0, decay_rate=0.5)
opt = adam.Adam(learning_rate=learning_rate_decay)
loss_a = lambda: 3 * a
loss_b = lambda: 2 * b
@def_function.function
def fn_a():
opt.minimize(loss_a, [a])
return a
@def_function.function
def fn_b():
opt.minimize(loss_b, [b])
return b
fn_a()
fn_b()
_NUM_LEARNERS = 50
APPLY_SCOPE = 'debug_apply'
ALLOWLIST = [
# optimizer_v2._deduplicate_indexed_slices contains an indexed slice:
# array_ops.shape(unique_indices)[0]
# which winds up expanding to [0:1:1] thereby creating three constants
# to represent the indices.
('embeddings/strided_slice/stack', 'Const'),
]
def get_inputs(op):
op_inputs = list(op.inputs) + op.control_inputs
names = [i.name for i in op_inputs]
op_inputs = [getattr(i, 'op', i) for i in op_inputs]
return op_inputs, names
def strip_name(node):
if 'Placeholder' in node.op:
return
node.name = ''
def topological_sort(graph):
graph_ops = graph.get_operations()
sources = []
result = []
inputs = {}
outputs = collections.defaultdict(set)
for op in graph_ops:
op_inputs = get_inputs(op)[0]
if not op_inputs:
sources.append(op)
inputs[op] = set(op_inputs)
for i in op_inputs:
outputs[i].add(op)
while sources:
op = sources.pop()
for op_output in outputs[op]:
inputs[op_output].remove(op)
if not inputs[op_output]:
sources.append(op_output)
result.append(op)
# Check correctness.
if len(result) != len(graph_ops):
raise ValueError('Sort result has {} ops, source graph has {}.'
.format(len(result), len(graph_ops)))
sort_check_seen = set()
for op in result:
sort_check_seen.add(op)
for i in get_inputs(op)[0]:
assert i in sort_check_seen
return result
def identify_redundant_ops(graph):
"""Implements basic common subexpression elimination.
This is not intended to replicate the graph semantics of TensorFlow Graphs
(for instance it does not handle stateful op ordering), nor is it intended to
replace the common subexpression elimination Grappler pass. Rather, it
provides a high level sanity check that clearly redundant ops are not being
created.
Args:
graph: The graph to be analyzed.
Returns:
A count of the duplicate ops and a description of the structure of each.
"""
sorted_ops = topological_sort(graph)
duplicates = collections.defaultdict(list)
unified_node_defs = {}
name_map = {}
for op in sorted_ops:
input_names = []
for op_input, name in zip(*get_inputs(op)):
input_def = op_input.node_def
# Operations can have multiple outputs. We track which is used to prevent
# overzealous elimination.
input_def.name = name
input_def.input[:] = [name_map.get(i, i) for i in input_def.input]
strip_name(input_def)
# NodeDef.SerializeToString() does not provide identical serialized
# representations for identical NodeDefs, so we instead use string
# representation as a dict key.
key = repr(input_def)
if key in unified_node_defs:
input_names.append(unified_node_defs[key])
else:
unified_node_defs[key] = op_input.name
input_names.append(name)
node_def = op.node_def
node_def.input[:] = input_names
strip_name(node_def)
key = repr(node_def)
duplicates[key].append(op)
name_map[op.name] = duplicates[key][0].name
num_duplicates = 0
duplicate_types = []
for standard_def, op_defs in duplicates.items():
# We are only interested in testing the apply method of the optimizer
op_defs = [i for i in op_defs if APPLY_SCOPE in i.name]
# We only check for per-apply redundant ops.
if len(op_defs) < _NUM_LEARNERS:
continue
# Certain ops are simply not worth eliminating, and are instead simply
# ignored.
name, op_type = op_defs[0].name, op_defs[0].type
if any(allowlisted_scope in name and op_type == allowlisted_type
for allowlisted_scope, allowlisted_type in ALLOWLIST):
continue
num_duplicates += len(op_defs)
traceback = []
for level in op_defs[0].traceback:
traceback.append(' {} {}:{}'.format(level[0], level[2], level[1]))
duplicate_types.append(
'# Example name: {}\n# Op creation stack:\n{}\n{}'.format(
op_defs[0].name,
'\n'.join(traceback),
standard_def))
return num_duplicates, duplicate_types
def make_model():
r"""Constructs a simple ensemble of weak learners model.
--------- --------- --------- ---------
| Input | | Input | ... | Input | | Input |
--------- --------- --------- ---------
| | | |
V V V V
--------- --------- --------- ---------
| Embed | | Embed | ... | Embed | | Embed |
--------- --------- --------- ---------
| | | |
V V V V
--------- --------- --------- ---------
| Dense | | Dense | ... | Dense | | Dense |
--------- --------- --------- ---------
\ | | /
\ | | /
---------------------------------------------
|
---------
| Dense |
---------
This topology is chosen because it exercises both dense and sparse update
paths.
Returns:
A model for testing optimizer coefficient reuse.
"""
inputs = []
intermediates = []
for _ in range(_NUM_LEARNERS):
inp = keras.layers.Input(shape=(1,), dtype=dtypes.int32)
layer = keras.layers.Embedding(1, 4)(inp)
layer = keras.layers.Dense(1)(layer)
inputs.append(inp)
intermediates.append(layer)
layer = keras.layers.Concatenate(axis=-1)(intermediates)
layer = keras.layers.Dense(1)(layer)
return keras.models.Model(inputs, layer)
COEFFICIENT_PARAMS = (
('Adadelta', adadelta.Adadelta, None),
('Adagrad', adagrad.Adagrad, None),
('Adam', adam.Adam, None),
('Adam_amdgrad', adam.Adam, dict(amsgrad=True)),
('Adamax', adamax.Adamax, None),
('Ftrl', ftrl.Ftrl, None),
('Ftrl_l2_shrinkage', ftrl.Ftrl,
dict(l2_shrinkage_regularization_strength=0.1)),
('SGD', gradient_descent.SGD, None),
('SGD_momentum', gradient_descent.SGD, dict(momentum=0.5)),
('Nadam', nadam.Nadam, None),
('RMSprop', rmsprop.RMSprop, None),
('RMSprop_centered', rmsprop.RMSprop, dict(centered=True)),
('RMSprop_momentum', rmsprop.RMSprop, dict(momentum=0.5)),
('RMSprop_momentum_centered', rmsprop.RMSprop,
dict(momentum=0.5, centered=True)),
)
class OptimizerCoefficientTest(keras_parameterized.TestCase):
@parameterized.named_parameters(*COEFFICIENT_PARAMS)
def test_duplicate_ops(self, optimizer_class, init_kwargs=None):
init_kwargs = init_kwargs or {}
optimizer = optimizer_class(**init_kwargs)
graph = ops.Graph()
with graph.as_default():
model = make_model()
trainable_variables = model.trainable_variables
grads = optimizer.get_gradients(model.outputs[0], trainable_variables)
with backend.name_scope(APPLY_SCOPE):
optimizer.apply_gradients(zip(grads, trainable_variables))
num_duplicates, duplicate_types = identify_redundant_ops(graph)
if num_duplicates:
# Avoid spamming logs.
if len(duplicate_types) > 3:
duplicate_types = duplicate_types[:3] + ['...']
num_total = len(graph.get_operations())
raise ValueError('{} of {} ({:.1f}%) ops were duplicates:\n\n{}'.format(
num_duplicates, num_total, num_duplicates / num_total * 100,
'\n'.join(duplicate_types)))
@parameterized.named_parameters(*COEFFICIENT_PARAMS)
def test_subclass_compat(self, optimizer_class, init_kwargs=None):
"""Ensure that subclassed optimizers without apply_state still work."""
class SubclassedOptimizer(optimizer_class):
def _resource_apply_dense(self, grad, var): # pylint: disable=useless-super-delegation
return super(SubclassedOptimizer, self)._resource_apply_dense(grad, var)
def _resource_apply_sparse(self, grad, var, indices): # pylint: disable=useless-super-delegation
return super(SubclassedOptimizer, self)._resource_apply_sparse(
grad, var, indices)
init_kwargs = init_kwargs or {}
optimizer = SubclassedOptimizer(**init_kwargs)
graph = ops.Graph()
with graph.as_default():
model = make_model()
trainable_variables = model.trainable_variables
grads = optimizer.get_gradients(model.outputs[0], trainable_variables)
with backend.name_scope(APPLY_SCOPE):
optimizer.apply_gradients(zip(grads, trainable_variables))
if __name__ == '__main__':
test.main()
| |
from openravepy import poseTransformPoints, matrixFromPose, matrixFromQuat, matrixFromAxisAngle, rotationMatrixFromQuat, quatFromAxisAngle, poseFromMatrix, axisAngleFromRotationMatrix, quatFromRotationMatrix, quatMult, quatInverse, quatRotateDirection, quatSlerp, RaveGetAffineDOFValuesFromTransform, DOFAffine, transformLookat
import numpy as np
from math import sin, cos
def norm(vector, order=2):
return np.linalg.norm(vector, ord=order)
def length(vector):
return np.linalg.norm(vector)
def length2(vector):
return np.dot(vector, vector)
def normalize(vector):
return 1. / length(vector) * vector
def unit_x():
return np.array((1, 0, 0))
def unit_y():
return np.array((0, 1, 0))
def unit_z():
return np.array((0, 0, 1))
def unit_point():
return np.zeros(3)
def unit_quat():
return np.array((1, 0, 0, 0))
def unit_rot():
return np.identity(3)
def unit_pose():
return np.array((1, 0, 0, 0, 0, 0, 0))
def unit_trans():
return np.identity(4)
def trans_transform_point(trans, point):
return trans.dot(np.concatenate([point, [1]]).T)[:3]
def trans_transform_points(trans, points):
return np.dot(trans[:3, :3], points) + np.tile(trans[:3, 3].T, (points.shape[1], 1)).T
def pose_transform_point(pose, point):
return poseTransformPoints(pose, np.array([point]))[0]
def quat_transform_point(quat, point):
return pose_transform_point(pose_from_quat_point(quat, unit_point()), point)
def rot_transform_point(rot, point):
return rot.dot(point)
def quat_from_pose(pose):
return pose[:4]
def point_from_pose(pose):
return pose[4:]
def pose_from_quat_point(quat, point):
return np.concatenate([quat, point])
def trans_from_quat_point(quat, point):
return trans_from_pose(pose_from_quat_point(quat, point))
def trans_from_pose(pose):
return matrixFromPose(pose)
def pose_from_trans(trans):
return poseFromMatrix(trans)
def trans_from_point(x, y, z):
return trans_from_quat_point(unit_quat(), np.array([x, y, z]))
def trans_from_quat(quat):
return matrixFromQuat(quat)
def trans_from_axis_angle(x_angle, y_angle, z_angle):
return trans_from_quat(quat_from_axis_angle(x_angle, y_angle, z_angle))
def trans_from_rot_point(rot, point):
trans = unit_trans()
trans[:3, :3] = rot
trans[:3, 3] = point
return trans
def trans_from_rot(rot):
return trans_from_rot_point(rot, unit_point())
def rot_from_trans(trans):
return trans[:3, :3]
def quat_from_trans(trans):
return quat_from_rot(rot_from_trans(trans))
def point_from_trans(trans):
return trans[:3, 3]
def quat_from_axis_angle(x_angle, y_angle, z_angle):
return quatFromAxisAngle(np.array((x_angle, y_angle, z_angle)))
def rot_from_axis_angle(x_angle, y_angle, z_angle):
return matrixFromAxisAngle(np.array((x_angle, y_angle, z_angle)))
def axis_angle_from_rot(rot):
return axisAngleFromRotationMatrix(rot)
def quat_from_rot(rot):
return quatFromRotationMatrix(rot)
def rot_from_quat(quat):
return rotationMatrixFromQuat(quat)
def quat_from_angle_vector(angle, vector):
return np.concatenate([[cos(angle / 2)], sin(angle / 2) * normalize(np.array(vector))])
def rot_from_angle_vector(angle, vector):
return rot_from_quat(quat_from_angle_vector(angle, vector))
def trans_dot(*trans):
return np.dot(*trans)
def trans_inv(trans):
return np.linalg.inv(trans)
def quat_dot(*quats):
return reduce(quatMult, quats)
def quat_inv(quat):
return quatInverse(quat)
def quat_look_at(vector1, vector2=None):
if vector2 is None:
vector2 = vector1
vector1 = unit_x()
return quatRotateDirection(vector1, vector2)
def rot_look_at(vector1, vector2=None):
return rot_from_quat(quat_look_at(vector1, vector2))
def camera_look_at(point, look_point=unit_point()):
return transformLookat(np.array(look_point) - np.array(point), np.array(point), -unit_z())
def quat_interpolate(quat1, quat2, t=.5):
return quatSlerp(quat1, quat2, t, True)
def pose_interpolate(pose1, pose2, t=.5):
return pose_from_quat_point(quat_interpolate(quat_from_pose(pose1), quat_from_pose(pose2), t),
t * point_from_pose(pose1) + (1 - t) * point_from_pose(pose2))
def vector_trans(trans, vector):
return trans_from_pose(pose_from_trans(trans) + np.concatenate([np.zeros((4,)), vector]))
def quat_from_z_rot(theta):
return quat_from_axis_angle(0, 0, theta)
def base_values_from_trans(trans):
return RaveGetAffineDOFValuesFromTransform(trans, DOFAffine.X | DOFAffine.Y | DOFAffine.RotationAxis, [0, 0, 1])
def xyzt_from_trans(trans):
return RaveGetAffineDOFValuesFromTransform(trans, DOFAffine.X | DOFAffine.Y | DOFAffine.Z | DOFAffine.RotationAxis, [0, 0, 1])
def base_values_from_pose(pose):
return base_values_from_trans(trans_from_pose(pose))
def is_upright(trans):
return abs(abs(trans[2, 2]) - 1) < 1e-6
def pose_from_base_values(base_values, z=0.0):
x, y, theta = base_values
return pose_from_quat_point(quat_from_z_rot(theta), [x, y, z])
def trans_from_base_values(base_values, z=0.0):
return trans_from_pose(pose_from_base_values(base_values, z=z))
def trans2D_from_trans(trans):
return trans_from_base_values(base_values_from_trans(trans))
def point_from_full_config(config):
return config[-7:-4]
def quat_from_full_config(config):
return config[-4:]
def pose_from_full_config(config):
return np.concatenate([quat_from_full_config(config), point_from_full_config(config)])
def trans_from_full_config(config):
return trans_from_pose(pose_from_full_config(config))
def base_values_from_full_config(config):
return base_values_from_pose(pose_from_full_config(config))
def arm_from_full_config(arm, config):
return config[arm.GetArmIndices()]
def arm_and_base_from_full_config(arm, config):
return np.concatenate([arm_from_full_config(arm, config), base_values_from_full_config(config)])
def full_config_from_pose(pose, config):
new_config = config.copy()
new_config[-7:-4] = pose[-3:]
new_config[-4:] = pose[:4]
return new_config
def full_config_from_trans(trans, config):
return full_config_from_pose(pose_from_trans(trans), config)
def full_config_from_base_values(base_values, config):
_, _, z = point_from_full_config(config)
return full_config_from_pose(pose_from_base_values(base_values, z=z), config)
def get_trans(body):
return body.GetTransform()
def get_pose(body):
return pose_from_trans(get_trans(body))
def get_point(body):
return point_from_trans(get_trans(body))
def get_quat(body):
return quat_from_pose(get_pose(body))
def get_config(body, joint_indices=None):
if joint_indices is None:
return body.GetDOFValues()
return body.GetDOFValues(indices=joint_indices)
def get_active_config(body):
return body.GetActiveDOFValues()
def get_active_indices(body):
return body.GetActiveDOFIndices()
def get_full_config(body, dof_indices=None):
if dof_indices is None:
return body.GetConfigurationValues()
return body.GetConfigurationValues()[dof_indices]
def set_trans(body, trans):
body.SetTransform(trans)
def set_pose(body, pose):
set_trans(body, trans_from_pose(pose))
def set_xy(body, x, y):
point = get_point(body)
set_point(body, np.array([x, y, point[2]]))
def set_point(body, point):
set_pose(body, pose_from_quat_point(get_quat(body), point))
def set_quat(body, quat):
set_pose(body, pose_from_quat_point(quat, get_point(body)))
def set_config(body, config, joint_indices=None):
if joint_indices is None:
body.SetDOFValues(config)
else:
body.SetDOFValues(config, joint_indices)
def set_active_config(body, config):
body.SetActiveDOFValues(config)
def set_active_indices(body, indices):
body.SetActiveDOFs(indices)
def set_full_config(body, config, dof_indices=None):
if dof_indices is None:
body.SetConfigurationValues(config)
else:
full_config = get_full_config(body)
full_config[dof_indices] = config
body.SetConfigurationValues(full_config)
def set_base_values(body, base_values):
trans = get_trans(body)
trans[:3, :3] = rot_from_quat(quat_from_z_rot(base_values[-1]))
trans[:2, 3] = base_values[:2]
set_trans(body, trans)
def set_manipulator_values(manipulator, values):
set_config(manipulator.GetRobot(), values, manipulator.GetArmIndices())
def object_trans_from_manip_trans(manip_trans, grasp):
return np.dot(manip_trans, grasp)
def manip_trans_from_object_trans(object_trans, grasp):
return np.linalg.solve(grasp.T, object_trans.T).T
def compute_grasp(manip_trans, object_trans):
return np.linalg.solve(manip_trans, object_trans)
| |
import types
import asyncio
import socket
from functools import partial
from collections import deque
from .util import (
encode_command,
wait_ok,
_NOTSET,
_set_result,
_set_exception,
coerced_keys_dict,
decode,
parse_url,
)
from .parser import Reader
from .stream import open_connection, open_unix_connection
from .errors import (
ConnectionClosedError,
ConnectionForcedCloseError,
RedisError,
ProtocolError,
ReplyError,
WatchVariableError,
ReadOnlyError,
MaxClientsError
)
from .pubsub import Channel
from .abc import AbcChannel
from .abc import AbcConnection
from .log import logger
__all__ = ['create_connection', 'RedisConnection']
MAX_CHUNK_SIZE = 65536
_PUBSUB_COMMANDS = (
'SUBSCRIBE', b'SUBSCRIBE',
'PSUBSCRIBE', b'PSUBSCRIBE',
'UNSUBSCRIBE', b'UNSUBSCRIBE',
'PUNSUBSCRIBE', b'PUNSUBSCRIBE',
)
async def create_connection(address, *, db=None, password=None, ssl=None,
encoding=None, parser=None, loop=None,
timeout=None, connection_cls=None):
"""Creates redis connection.
Opens connection to Redis server specified by address argument.
Address argument can be one of the following:
* A tuple representing (host, port) pair for TCP connections;
* A string representing either Redis URI or unix domain socket path.
SSL argument is passed through to asyncio.create_connection.
By default SSL/TLS is not used.
By default any timeout is applied at the connection stage, however
you can set a limitted time used trying to open a connection via
the `timeout` Kw.
Encoding argument can be used to decode byte-replies to strings.
By default no decoding is done.
Parser parameter can be used to pass custom Redis protocol parser class.
By default hiredis.Reader is used (unless it is missing or platform
is not CPython).
Return value is RedisConnection instance or a connection_cls if it is
given.
This function is a coroutine.
"""
assert isinstance(address, (tuple, list, str)), "tuple or str expected"
if isinstance(address, str):
logger.debug("Parsing Redis URI %r", address)
address, options = parse_url(address)
db = options.setdefault('db', db)
password = options.setdefault('password', password)
encoding = options.setdefault('encoding', encoding)
timeout = options.setdefault('timeout', timeout)
if 'ssl' in options:
assert options['ssl'] or (not options['ssl'] and not ssl), (
"Conflicting ssl options are set", options['ssl'], ssl)
ssl = ssl or options['ssl']
if timeout is not None and timeout <= 0:
raise ValueError("Timeout has to be None or a number greater than 0")
if connection_cls:
assert issubclass(connection_cls, AbcConnection),\
"connection_class does not meet the AbcConnection contract"
cls = connection_cls
else:
cls = RedisConnection
if loop is None:
loop = asyncio.get_event_loop()
if isinstance(address, (list, tuple)):
host, port = address
logger.debug("Creating tcp connection to %r", address)
reader, writer = await asyncio.wait_for(open_connection(
host, port, limit=MAX_CHUNK_SIZE, ssl=ssl, loop=loop),
timeout, loop=loop)
sock = writer.transport.get_extra_info('socket')
if sock is not None:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
address = sock.getpeername()
address = tuple(address[:2])
else:
logger.debug("Creating unix connection to %r", address)
reader, writer = await asyncio.wait_for(open_unix_connection(
address, ssl=ssl, limit=MAX_CHUNK_SIZE, loop=loop),
timeout, loop=loop)
sock = writer.transport.get_extra_info('socket')
if sock is not None:
address = sock.getpeername()
conn = cls(reader, writer, encoding=encoding,
address=address, parser=parser,
loop=loop)
try:
if password is not None:
await conn.auth(password)
if db is not None:
await conn.select(db)
except Exception:
conn.close()
await conn.wait_closed()
raise
return conn
class RedisConnection(AbcConnection):
"""Redis connection."""
def __init__(self, reader, writer, *, address, encoding=None,
parser=None, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
if parser is None:
parser = Reader
assert callable(parser), (
"Parser argument is not callable", parser)
self._reader = reader
self._writer = writer
self._address = address
self._loop = loop
self._waiters = deque()
self._reader.set_parser(
parser(protocolError=ProtocolError, replyError=ReplyError)
)
self._reader_task = asyncio.ensure_future(self._read_data(),
loop=self._loop)
self._close_msg = None
self._db = 0
self._closing = False
self._closed = False
self._close_waiter = loop.create_future()
self._reader_task.add_done_callback(self._close_waiter.set_result)
self._in_transaction = None
self._transaction_error = None # XXX: never used?
self._in_pubsub = 0
self._pubsub_channels = coerced_keys_dict()
self._pubsub_patterns = coerced_keys_dict()
self._encoding = encoding
def __repr__(self):
return '<RedisConnection [db:{}]>'.format(self._db)
async def _read_data(self):
"""Response reader task."""
last_error = ConnectionClosedError(
"Connection has been closed by server")
while not self._reader.at_eof():
try:
obj = await self._reader.readobj()
except asyncio.CancelledError:
# NOTE: reader can get cancelled from `close()` method only.
last_error = RuntimeError('this is unexpected')
break
except ProtocolError as exc:
# ProtocolError is fatal
# so connection must be closed
if self._in_transaction is not None:
self._transaction_error = exc
last_error = exc
break
except Exception as exc:
# NOTE: for QUIT command connection error can be received
# before response
last_error = exc
break
else:
if (obj == b'' or obj is None) and self._reader.at_eof():
logger.debug("Connection has been closed by server,"
" response: %r", obj)
last_error = ConnectionClosedError("Reader at end of file")
break
if isinstance(obj, MaxClientsError):
last_error = obj
break
if self._in_pubsub:
self._process_pubsub(obj)
else:
self._process_data(obj)
self._closing = True
self._loop.call_soon(self._do_close, last_error)
def _process_data(self, obj):
"""Processes command results."""
assert len(self._waiters) > 0, (type(obj), obj)
waiter, encoding, cb = self._waiters.popleft()
if isinstance(obj, RedisError):
if isinstance(obj, ReplyError):
if obj.args[0].startswith('READONLY'):
obj = ReadOnlyError(obj.args[0])
_set_exception(waiter, obj)
if self._in_transaction is not None:
self._transaction_error = obj
else:
if encoding is not None:
try:
obj = decode(obj, encoding)
except Exception as exc:
_set_exception(waiter, exc)
return
if cb is not None:
try:
obj = cb(obj)
except Exception as exc:
_set_exception(waiter, exc)
return
_set_result(waiter, obj)
if self._in_transaction is not None:
self._in_transaction.append((encoding, cb))
def _process_pubsub(self, obj, *, process_waiters=True):
"""Processes pubsub messages."""
kind, *args, data = obj
if kind in (b'subscribe', b'unsubscribe'):
chan, = args
if process_waiters and self._in_pubsub and self._waiters:
self._process_data(obj)
if kind == b'unsubscribe':
ch = self._pubsub_channels.pop(chan, None)
if ch:
ch.close()
self._in_pubsub = data
elif kind in (b'psubscribe', b'punsubscribe'):
chan, = args
if process_waiters and self._in_pubsub and self._waiters:
self._process_data(obj)
if kind == b'punsubscribe':
ch = self._pubsub_patterns.pop(chan, None)
if ch:
ch.close()
self._in_pubsub = data
elif kind == b'message':
chan, = args
self._pubsub_channels[chan].put_nowait(data)
elif kind == b'pmessage':
pattern, chan = args
self._pubsub_patterns[pattern].put_nowait((chan, data))
elif kind == b'pong':
if process_waiters and self._in_pubsub and self._waiters:
self._process_data(data or b'PONG')
else:
logger.warning("Unknown pubsub message received %r", obj)
def execute(self, command, *args, encoding=_NOTSET):
"""Executes redis command and returns Future waiting for the answer.
Raises:
* TypeError if any of args can not be encoded as bytes.
* ReplyError on redis '-ERR' resonses.
* ProtocolError when response can not be decoded meaning connection
is broken.
"""
if self._reader is None or self._reader.at_eof():
msg = self._close_msg or "Connection closed or corrupted"
raise ConnectionClosedError(msg)
if command is None:
raise TypeError("command must not be None")
if None in args:
raise TypeError("args must not contain None")
command = command.upper().strip()
is_pubsub = command in _PUBSUB_COMMANDS
is_ping = command in ('PING', b'PING')
if self._in_pubsub and not (is_pubsub or is_ping):
raise RedisError("Connection in SUBSCRIBE mode")
elif is_pubsub:
logger.warning("Deprecated. Use `execute_pubsub` method directly")
return self.execute_pubsub(command, *args)
if command in ('SELECT', b'SELECT'):
cb = partial(self._set_db, args=args)
elif command in ('MULTI', b'MULTI'):
cb = self._start_transaction
elif command in ('EXEC', b'EXEC'):
cb = partial(self._end_transaction, discard=False)
elif command in ('DISCARD', b'DISCARD'):
cb = partial(self._end_transaction, discard=True)
else:
cb = None
if encoding is _NOTSET:
encoding = self._encoding
fut = self._loop.create_future()
self._writer.write(encode_command(command, *args))
self._waiters.append((fut, encoding, cb))
return fut
def execute_pubsub(self, command, *channels):
"""Executes redis (p)subscribe/(p)unsubscribe commands.
Returns asyncio.gather coroutine waiting for all channels/patterns
to receive answers.
"""
command = command.upper().strip()
assert command in _PUBSUB_COMMANDS, (
"Pub/Sub command expected", command)
if self._reader is None or self._reader.at_eof():
raise ConnectionClosedError("Connection closed or corrupted")
if None in set(channels):
raise TypeError("args must not contain None")
if not len(channels):
raise TypeError("No channels/patterns supplied")
is_pattern = len(command) in (10, 12)
mkchannel = partial(Channel, is_pattern=is_pattern, loop=self._loop)
channels = [ch if isinstance(ch, AbcChannel) else mkchannel(ch)
for ch in channels]
if not all(ch.is_pattern == is_pattern for ch in channels):
raise ValueError("Not all channels {} match command {}"
.format(channels, command))
cmd = encode_command(command, *(ch.name for ch in channels))
res = []
for ch in channels:
fut = self._loop.create_future()
res.append(fut)
cb = partial(self._update_pubsub, ch=ch)
self._waiters.append((fut, None, cb))
self._writer.write(cmd)
return asyncio.gather(*res, loop=self._loop)
def close(self):
"""Close connection."""
self._do_close(ConnectionForcedCloseError())
def _do_close(self, exc):
if self._closed:
return
self._closed = True
self._closing = False
self._writer.transport.close()
self._reader_task.cancel()
self._reader_task = None
self._writer = None
self._reader = None
if exc is not None:
self._close_msg = str(exc)
while self._waiters:
waiter, *spam = self._waiters.popleft()
logger.debug("Cancelling waiter %r", (waiter, spam))
if exc is None:
_set_exception(waiter, ConnectionForcedCloseError())
else:
_set_exception(waiter, exc)
while self._pubsub_channels:
_, ch = self._pubsub_channels.popitem()
logger.debug("Closing pubsub channel %r", ch)
ch.close(exc)
while self._pubsub_patterns:
_, ch = self._pubsub_patterns.popitem()
logger.debug("Closing pubsub pattern %r", ch)
ch.close(exc)
@property
def closed(self):
"""True if connection is closed."""
closed = self._closing or self._closed
if not closed and self._reader and self._reader.at_eof():
self._closing = closed = True
self._loop.call_soon(self._do_close, None)
return closed
async def wait_closed(self):
"""Coroutine waiting until connection is closed."""
await asyncio.shield(self._close_waiter, loop=self._loop)
@property
def db(self):
"""Currently selected db index."""
return self._db
@property
def encoding(self):
"""Current set codec or None."""
return self._encoding
@property
def address(self):
"""Redis server address, either host-port tuple or str."""
return self._address
def select(self, db):
"""Change the selected database for the current connection."""
if not isinstance(db, int):
raise TypeError("DB must be of int type, not {!r}".format(db))
if db < 0:
raise ValueError("DB must be greater or equal 0, got {!r}"
.format(db))
fut = self.execute('SELECT', db)
return wait_ok(fut)
def _set_db(self, ok, args):
assert ok in {b'OK', 'OK'}, ("Unexpected result of SELECT", ok)
self._db = args[0]
return ok
def _start_transaction(self, ok):
assert self._in_transaction is None, (
"Connection is already in transaction", self._in_transaction)
self._in_transaction = deque()
self._transaction_error = None
return ok
def _end_transaction(self, obj, discard):
assert self._in_transaction is not None, (
"Connection is not in transaction", obj)
self._transaction_error = None
recall, self._in_transaction = self._in_transaction, None
recall.popleft() # ignore first (its _start_transaction)
if discard:
return obj
assert isinstance(obj, list) or (obj is None and not discard), (
"Unexpected MULTI/EXEC result", obj, recall)
# TODO: need to be able to re-try transaction
if obj is None:
err = WatchVariableError("WATCH variable has changed")
obj = [err] * len(recall)
assert len(obj) == len(recall), (
"Wrong number of result items in mutli-exec", obj, recall)
res = []
for o, (encoding, cb) in zip(obj, recall):
if not isinstance(o, RedisError):
try:
if encoding:
o = decode(o, encoding)
if cb:
o = cb(o)
except Exception as err:
res.append(err)
continue
res.append(o)
return res
def _update_pubsub(self, obj, *, ch):
kind, *pattern, channel, subscriptions = obj
self._in_pubsub, was_in_pubsub = subscriptions, self._in_pubsub
# XXX: the channels/patterns storage should be refactored.
# if code which supposed to read from channel/pattern
# failed (exception in reader or else) than
# the channel object will still reside in memory
# and leak memory (messages will be put in queue).
if kind == b'subscribe' and channel not in self._pubsub_channels:
self._pubsub_channels[channel] = ch
elif kind == b'psubscribe' and channel not in self._pubsub_patterns:
self._pubsub_patterns[channel] = ch
if not was_in_pubsub:
self._process_pubsub(obj, process_waiters=False)
return obj
@property
def in_transaction(self):
"""Set to True when MULTI command was issued."""
return self._in_transaction is not None
@property
def in_pubsub(self):
"""Indicates that connection is in PUB/SUB mode.
Provides the number of subscribed channels.
"""
return self._in_pubsub
@property
def pubsub_channels(self):
"""Returns read-only channels dict."""
return types.MappingProxyType(self._pubsub_channels)
@property
def pubsub_patterns(self):
"""Returns read-only patterns dict."""
return types.MappingProxyType(self._pubsub_patterns)
def auth(self, password):
"""Authenticate to server."""
fut = self.execute('AUTH', password)
return wait_ok(fut)
| |
###################################################
# header items.py
# This file contains declarations for items
# DO NOT EDIT THIS FILE!
###################################################
#item flags
itp_type_horse = 0x0000000000000001
itp_type_one_handed_wpn = 0x0000000000000002
itp_type_two_handed_wpn = 0x0000000000000003
itp_type_polearm = 0x0000000000000004
itp_type_arrows = 0x0000000000000005
itp_type_bolts = 0x0000000000000006
itp_type_shield = 0x0000000000000007
itp_type_bow = 0x0000000000000008
itp_type_crossbow = 0x0000000000000009
itp_type_thrown = 0x000000000000000a
itp_type_goods = 0x000000000000000b
itp_type_head_armor = 0x000000000000000c
itp_type_body_armor = 0x000000000000000d
itp_type_foot_armor = 0x000000000000000e
itp_type_hand_armor = 0x000000000000000f
itp_type_pistol = 0x0000000000000010
itp_type_musket = 0x0000000000000011
itp_type_bullets = 0x0000000000000012
itp_type_animal = 0x0000000000000013
itp_type_book = 0x0000000000000014
itp_force_attach_left_hand = 0x0000000000000100
itp_force_attach_right_hand = 0x0000000000000200
itp_force_attach_left_forearm = 0x0000000000000300
itp_attach_armature = 0x0000000000000f00
itp_attachment_mask = 0x0000000000000f00
itp_unique = 0x0000000000001000
itp_always_loot = 0x0000000000002000 # Was itp_melee
itp_no_parry = 0x0000000000004000 # Was itp_spear
itp_default_ammo = 0x0000000000008000
itp_merchandise = 0x0000000000010000
itp_wooden_attack = 0x0000000000020000
itp_wooden_parry = 0x0000000000040000
itp_food = 0x0000000000080000
itp_cant_reload_on_horseback = 0x0000000000100000
itp_two_handed = 0x0000000000200000
itp_primary = 0x0000000000400000
itp_secondary = 0x0000000000800000
itp_covers_legs = 0x0000000001000000
itp_doesnt_cover_hair = 0x0000000001000000
itp_can_penetrate_shield = 0x0000000001000000
itp_consumable = 0x0000000002000000
itp_bonus_against_shield = 0x0000000004000000
itp_penalty_with_shield = 0x0000000008000000
itp_cant_use_on_horseback = 0x0000000010000000
itp_civilian = 0x0000000020000000
itp_next_item_as_melee = 0x0000000020000000
itp_fit_to_head = 0x0000000040000000
itp_offset_lance = 0x0000000040000000
itp_covers_head = 0x0000000080000000
itp_couchable = 0x0000000080000000
itp_crush_through = 0x0000000100000000
#itp_knock_back = 0x0000000200000000 being used?
itp_remove_item_on_use = 0x0000000400000000
itp_unbalanced = 0x0000000800000000
itp_covers_beard = 0x0000001000000000
itp_no_pick_up_from_ground = 0x0000002000000000
itp_can_knock_down = 0x0000004000000000
itp_covers_hair = 0x0000008000000000 #remove hair mesh for armors only
itp_force_show_body = 0x0000010000000000 # forces showing body (works on body armor items)
itp_force_show_left_hand = 0x0000020000000000 # forces showing left hand (works on hand armor items)
itp_force_show_right_hand = 0x0000040000000000 # forces showing right hand (works on hand armor items)
itp_extra_penetration = 0x0000100000000000
itp_has_bayonet = 0x0000200000000000
itp_cant_reload_while_moving = 0x0000400000000000
itp_ignore_gravity = 0x0000800000000000
itp_ignore_friction = 0x0001000000000000
itp_is_pike = 0x0002000000000000
itp_offset_musket = 0x0004000000000000
itp_no_blur = 0x0008000000000000
itp_cant_reload_while_moving_mounted = 0x0010000000000000
itp_has_upper_stab = 0x0020000000000000
itp_kill_info_mask = 0x0700000000000000
itp_kill_info_bits = 56
#equipment slots
ek_item_0 = 0
ek_item_1 = 1
ek_item_2 = 2
ek_item_3 = 3
ek_head = 4
ek_body = 5
ek_foot = 6
ek_gloves = 7
ek_horse = 8
ek_food = 9
max_inventory_items = 96
num_equipment_kinds = ek_food + 1
num_weapon_proficiencies = 7
#damage types:
cut = 0
pierce = 1
blunt = 2
ibf_armor_mask = 0x00000000000000000000000ff
ibf_damage_mask = 0x00000000000000000000003ff
ibf_10bit_mask = 0x00000000000000000000003ff
ibf_head_armor_bits = 0
ibf_body_armor_bits = 8
ibf_leg_armor_bits = 16
ibf_weight_bits = 24
ibf_difficulty_bits = 32
ibf_hitpoints_mask = 0x0000ffff
ibf_hitpoints_bits = 40
iwf_swing_damage_bits = 50
iwf_swing_damage_type_bits = 58
iwf_thrust_damage_bits = 60
iwf_thrust_damage_type_bits = 68
iwf_weapon_length_bits = 70
iwf_speed_rating_bits = 80
iwf_shoot_speed_bits = 90
iwf_max_ammo_bits = 100 # use this for shield endurance too?
iwf_abundance_bits = 110
iwf_accuracy_bits = 16 #reuse leg_armor for accuracy
iwf_damage_type_bits = 8
def get_weight(y):
a = (y >> ibf_weight_bits) & ibf_armor_mask
return 0.25 * a
def get_head_armor(y):
return (y >> ibf_head_armor_bits) & ibf_armor_mask
def get_body_armor(y):
return (y >> ibf_body_armor_bits) & ibf_armor_mask
def get_leg_armor(y):
return (y >> ibf_leg_armor_bits) & ibf_armor_mask
def get_difficulty(y):
return (y >> ibf_difficulty_bits) & ibf_armor_mask
def get_hit_points(y):
return (y >> ibf_hitpoints_bits) & ibf_hitpoints_mask
def get_speed_rating(y):
return (y >> iwf_speed_rating_bits) & ibf_armor_mask
def get_missile_speed(y):
return (y >> iwf_shoot_speed_bits) & ibf_10bit_mask
def get_weapon_length(y):
return ((y >> iwf_weapon_length_bits) & ibf_10bit_mask)
def get_max_ammo(y):
return (y >> iwf_max_ammo_bits) & ibf_armor_mask
def get_swing_damage(y):
return (y >> iwf_swing_damage_bits) & ibf_damage_mask
def get_thrust_damage(y):
return (y >> iwf_thrust_damage_bits) & ibf_damage_mask
def get_abundance(y):
abnd = (y >> iwf_abundance_bits) & ibf_armor_mask
if (abnd == 0):
abnd = 100
return abnd
def custom_kill_info(x): # you have to add ico_custom_x (where x is a number between 1 and 7) mesh in order to display it correctly.
return (((bignum | x) & (itp_kill_info_mask >> itp_kill_info_bits)) << itp_kill_info_bits)
# Item capabilities:
itcf_thrust_onehanded = 0x0000000000000001
itcf_overswing_onehanded = 0x0000000000000002
itcf_slashright_onehanded = 0x0000000000000004
itcf_slashleft_onehanded = 0x0000000000000008
itcf_thrust_twohanded = 0x0000000000000010
itcf_overswing_twohanded = 0x0000000000000020
itcf_slashright_twohanded = 0x0000000000000040
itcf_slashleft_twohanded = 0x0000000000000080
itcf_thrust_polearm = 0x0000000000000100
itcf_overswing_polearm = 0x0000000000000200
itcf_slashright_polearm = 0x0000000000000400
itcf_slashleft_polearm = 0x0000000000000800
itcf_shoot_bow = 0x0000000000001000
itcf_shoot_javelin = 0x0000000000002000
itcf_shoot_crossbow = 0x0000000000004000
itcf_throw_stone = 0x0000000000010000
itcf_throw_knife = 0x0000000000020000
itcf_throw_axe = 0x0000000000030000
itcf_throw_javelin = 0x0000000000040000
itcf_shoot_pistol = 0x0000000000070000
itcf_shoot_musket = 0x0000000000080000
itcf_shoot_mask = 0x00000000000ff000
itcf_horseback_thrust_onehanded = 0x0000000000100000
itcf_horseback_overswing_right_onehanded = 0x0000000000200000
itcf_horseback_overswing_left_onehanded = 0x0000000000400000
itcf_horseback_slashright_onehanded = 0x0000000000800000
itcf_horseback_slashleft_onehanded = 0x0000000001000000
itcf_thrust_onehanded_lance = 0x0000000004000000
itcf_thrust_onehanded_lance_horseback = 0x0000000008000000
itcf_carry_mask = 0x00000007f0000000
itcf_carry_sword_left_hip = 0x0000000010000000
itcf_carry_axe_left_hip = 0x0000000020000000
itcf_carry_dagger_front_left = 0x0000000030000000
itcf_carry_dagger_front_right = 0x0000000040000000
itcf_carry_quiver_front_right = 0x0000000050000000
itcf_carry_quiver_back_right = 0x0000000060000000
itcf_carry_quiver_right_vertical = 0x0000000070000000
itcf_carry_quiver_back = 0x0000000080000000
itcf_carry_revolver_right = 0x0000000090000000
itcf_carry_pistol_front_left = 0x00000000a0000000
itcf_carry_bowcase_left = 0x00000000b0000000
itcf_carry_mace_left_hip = 0x00000000c0000000
itcf_carry_axe_back = 0x0000000100000000
itcf_carry_sword_back = 0x0000000110000000
itcf_carry_kite_shield = 0x0000000120000000
itcf_carry_round_shield = 0x0000000130000000
itcf_carry_buckler_left = 0x0000000140000000
itcf_carry_crossbow_back = 0x0000000150000000
itcf_carry_bow_back = 0x0000000160000000
itcf_carry_spear = 0x0000000170000000
itcf_carry_board_shield = 0x0000000180000000
itcf_carry_katana = 0x0000000210000000
itcf_carry_wakizashi = 0x0000000220000000
itcf_show_holster_when_drawn = 0x0000000800000000
itcf_reload_pistol = 0x0000007000000000
itcf_reload_musket = 0x0000008000000000
itcf_reload_mask = 0x000000f000000000
itcf_parry_forward_onehanded = 0x0000010000000000
itcf_parry_up_onehanded = 0x0000020000000000
itcf_parry_right_onehanded = 0x0000040000000000
itcf_parry_left_onehanded = 0x0000080000000000
itcf_parry_forward_twohanded = 0x0000100000000000
itcf_parry_up_twohanded = 0x0000200000000000
itcf_parry_right_twohanded = 0x0000400000000000
itcf_parry_left_twohanded = 0x0000800000000000
itcf_parry_forward_polearm = 0x0001000000000000
itcf_parry_up_polearm = 0x0002000000000000
itcf_parry_right_polearm = 0x0004000000000000
itcf_parry_left_polearm = 0x0008000000000000
itcf_horseback_slash_polearm = 0x0010000000000000
itcf_overswing_spear = 0x0020000000000000
itcf_overswing_musket = 0x0040000000000000
itcf_thrust_musket = 0x0080000000000000
itcf_force_64_bits = 0x8000000000000000
#combined capabilities
itc_cleaver = itcf_force_64_bits | (itcf_overswing_onehanded|itcf_slashright_onehanded|itcf_slashleft_onehanded | itcf_horseback_slashright_onehanded|itcf_horseback_slashleft_onehanded)
itc_dagger = itc_cleaver | itcf_thrust_onehanded
itc_parry_onehanded = itcf_force_64_bits | itcf_parry_forward_onehanded| itcf_parry_up_onehanded | itcf_parry_right_onehanded |itcf_parry_left_onehanded
itc_parry_two_handed = itcf_force_64_bits | itcf_parry_forward_twohanded | itcf_parry_up_twohanded | itcf_parry_right_twohanded | itcf_parry_left_twohanded
itc_longsword = itc_dagger | itc_parry_onehanded
itc_scimitar = itc_cleaver | itc_parry_onehanded
itc_cut_two_handed = itcf_force_64_bits | (itcf_slashright_twohanded | itcf_slashleft_twohanded | itcf_overswing_twohanded |
itcf_horseback_slashright_onehanded|itcf_horseback_slashleft_onehanded)
itc_greatsword = itc_cut_two_handed | itcf_thrust_twohanded | itc_parry_two_handed |itcf_thrust_onehanded_lance
itc_nodachi = itc_cut_two_handed | itc_parry_two_handed
itc_bastardsword = itc_cut_two_handed | itcf_thrust_twohanded | itc_parry_two_handed |itc_dagger
itc_morningstar = itc_cut_two_handed | itc_parry_two_handed |itc_cleaver
itc_parry_polearm = itcf_parry_forward_polearm | itcf_parry_up_polearm | itcf_parry_right_polearm | itcf_parry_left_polearm
itc_poleaxe = itc_parry_polearm| itcf_overswing_polearm |itcf_thrust_polearm|itcf_slashright_polearm|itcf_slashleft_polearm
itc_staff = itc_parry_polearm| itcf_thrust_onehanded_lance |itcf_thrust_onehanded_lance_horseback| itcf_overswing_polearm |itcf_thrust_polearm|itcf_slashright_polearm|itcf_slashleft_polearm
itc_spear = itc_parry_polearm| itcf_thrust_onehanded_lance |itcf_thrust_onehanded_lance_horseback | itcf_thrust_polearm
itc_cutting_spear = itc_spear|itcf_overswing_polearm
itc_pike = itcf_thrust_onehanded_lance |itcf_thrust_onehanded_lance_horseback | itcf_thrust_polearm
itc_guandao = itc_parry_polearm|itcf_overswing_polearm|itcf_thrust_polearm|itcf_slashright_polearm|itcf_slashleft_polearm|itcf_horseback_slashright_onehanded|itcf_horseback_slashleft_onehanded|itcf_horseback_slash_polearm
itc_musket_melee = itc_parry_polearm|itcf_overswing_musket|itcf_thrust_musket|itcf_slashright_twohanded|itcf_slashleft_twohanded
itc_greatlance = itcf_thrust_onehanded_lance | itcf_thrust_onehanded_lance_horseback | itcf_thrust_polearm
itc_lightsaber_standard = itc_cut_two_handed | itc_parry_two_handed |itc_dagger
#EXtra Mesh IDs
ixmesh_inventory = 0x1000000000000000
ixmesh_flying_ammo = 0x2000000000000000
ixmesh_carry = 0x3000000000000000
| |
# Copyright 2013 IBM Corp.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
import math
import time
import microversion_parse
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import strutils
import six
import webob
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack import versioned_method
from nova import exception
from nova import i18n
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova import utils
from nova import wsgi
LOG = logging.getLogger(__name__)
_SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.compute+json',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'json',
'application/json': 'json',
}
# These are typically automatically created by routes as either defaults
# collection or member methods.
_ROUTES_METHODS = [
'create',
'delete',
'show',
'update',
]
_METHODS_WITH_BODY = [
'POST',
'PUT',
]
# The default api version request if none is requested in the headers
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
DEFAULT_API_VERSION = "2.1"
# name of attribute to keep version method information
VER_METHOD_ATTR = 'versioned_methods'
# Names of headers used by clients to request a specific version
# of the REST API
API_VERSION_REQUEST_HEADER = 'OpenStack-API-Version'
LEGACY_API_VERSION_REQUEST_HEADER = 'X-OpenStack-Nova-API-Version'
ENV_LEGACY_V2 = 'openstack.legacy_v2'
def get_supported_content_types():
return _SUPPORTED_CONTENT_TYPES
def get_media_map():
return dict(_MEDIA_TYPE_MAP.items())
# NOTE(rlrossit): This function allows a get on both a dict-like and an
# object-like object. cache_db_items() is used on both versioned objects and
# dicts, so the function can't be totally changed over to [] syntax, nor
# can it be changed over to use getattr().
def item_get(item, item_key):
if hasattr(item, '__getitem__'):
return item[item_key]
else:
return getattr(item, item_key)
class Request(wsgi.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def __init__(self, *args, **kwargs):
super(Request, self).__init__(*args, **kwargs)
self._extension_data = {'db_items': {}}
if not hasattr(self, 'api_version_request'):
self.api_version_request = api_version.APIVersionRequest()
def cache_db_items(self, key, items, item_key='id'):
"""Allow API methods to store objects from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
db_items = self._extension_data['db_items'].setdefault(key, {})
for item in items:
db_items[item_get(item, item_key)] = item
def get_db_items(self, key):
"""Allow an API extension to get previously stored objects within
the same API request.
Note that the object data will be slightly stale.
"""
return self._extension_data['db_items'][key]
def get_db_item(self, key, item_key):
"""Allow an API extension to get a previously stored object
within the same API request.
Note that the object data will be slightly stale.
"""
return self.get_db_items(key).get(item_key)
def cache_db_instances(self, instances):
self.cache_db_items('instances', instances, 'uuid')
def cache_db_instance(self, instance):
self.cache_db_items('instances', [instance], 'uuid')
def get_db_instances(self):
return self.get_db_items('instances')
def get_db_instance(self, instance_uuid):
return self.get_db_item('instances', instance_uuid)
def cache_db_flavors(self, flavors):
self.cache_db_items('flavors', flavors, 'flavorid')
def cache_db_flavor(self, flavor):
self.cache_db_items('flavors', [flavor], 'flavorid')
def get_db_flavors(self):
return self.get_db_items('flavors')
def get_db_flavor(self, flavorid):
return self.get_db_item('flavors', flavorid)
def cache_db_compute_nodes(self, compute_nodes):
self.cache_db_items('compute_nodes', compute_nodes, 'id')
def cache_db_compute_node(self, compute_node):
self.cache_db_items('compute_nodes', [compute_node], 'id')
def get_db_compute_nodes(self):
return self.get_db_items('compute_nodes')
def get_db_compute_node(self, id):
return self.get_db_item('compute_nodes', id)
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'nova.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in get_supported_content_types():
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(
get_supported_content_types())
self.environ['nova.best_content_type'] = (content_type or
'application/json')
return self.environ['nova.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if "Content-Type" not in self.headers:
return None
content_type = self.content_type
# NOTE(markmc): text/plain is the default for eventlet and
# other webservers which use mimetools.Message.gettype()
# whereas twisted defaults to ''.
if not content_type or content_type == 'text/plain':
return None
if content_type not in get_supported_content_types():
raise exception.InvalidContentType(content_type=content_type)
return content_type
def best_match_language(self):
"""Determine the best available language for the request.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
return self.accept_language.best_match(
i18n.get_available_languages())
def set_api_version_request(self):
"""Set API version request based on the request header information."""
hdr_string = microversion_parse.get_version(
self.headers, service_type='compute',
legacy_headers=[LEGACY_API_VERSION_REQUEST_HEADER])
if hdr_string is None:
self.api_version_request = api_version.APIVersionRequest(
api_version.DEFAULT_API_VERSION)
elif hdr_string == 'latest':
# 'latest' is a special keyword which is equivalent to
# requesting the maximum version of the API supported
self.api_version_request = api_version.max_api_version()
else:
self.api_version_request = api_version.APIVersionRequest(
hdr_string)
# Check that the version requested is within the global
# minimum/maximum of supported API versions
if not self.api_version_request.matches(
api_version.min_api_version(),
api_version.max_api_version()):
raise exception.InvalidGlobalAPIVersion(
req_ver=self.api_version_request.get_string(),
min_ver=api_version.min_api_version().get_string(),
max_ver=api_version.max_api_version().get_string())
def set_legacy_v2(self):
self.environ[ENV_LEGACY_V2] = True
def is_legacy_v2(self):
return self.environ.get(ENV_LEGACY_V2, False)
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class JSONDeserializer(ActionDispatcher):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class JSONDictSerializer(ActionDispatcher):
"""Default JSON request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return six.text_type(jsonutils.dumps(data))
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object
Object that app methods may return in order to allow its response
to be modified by extensions in the code. Its use is optional (and
should only be used if you really know what you are doing).
"""
def __init__(self, obj, code=None, headers=None):
"""Builds a response object."""
self.obj = obj
self._default_code = 200
self._code = code
self._headers = headers or {}
self.serializer = JSONDictSerializer()
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def serialize(self, request, content_type):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
serializer = self.serializer
body = None
if self.obj is not None:
body = serializer.serialize(self.obj)
response = webob.Response(body=body)
if response.headers.get('Content-Length'):
# NOTE(andreykurilin): we need to encode 'Content-Length' header,
# since webob.Response auto sets it if "body" attr is presented.
# https://github.com/Pylons/webob/blob/1.5.0b0/webob/response.py#L147
response.headers['Content-Length'] = utils.utf8(
response.headers['Content-Length'])
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = utils.utf8(value)
response.headers['Content-Type'] = utils.utf8(content_type)
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek(body):
"""Determine action to invoke.
This looks inside the json body and fetches out the action method
name.
"""
try:
decoded = jsonutils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action name
return list(decoded.keys())[0]
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.Forbidden):
raise Fault(webob.exc.HTTPForbidden(
explanation=ex_value.format_message()))
elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod):
raise
elif isinstance(ex_value, exception.Invalid):
raise Fault(exception.ConvertedException(
code=ex_value.code,
explanation=ex_value.format_message()))
elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_LE('Exception handling resource: %s'), ex_value,
exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_LI("Fault thrown: %s"), ex_value)
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_LI("HTTP exception thrown: %s"), ex_value)
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
support_api_request_version = False
def __init__(self, controller, inherits=None):
""":param controller: object that implement methods created by routes
lib
:param inherits: another resource object that this resource should
inherit extensions from. Any action extensions that
are applied to the parent resource will also apply
to this resource.
"""
self.controller = controller
self.default_serializers = dict(json=JSONDictSerializer)
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
self.inherits = inherits
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
content_type = request.get_content_type()
return content_type, request.body
def deserialize(self, body):
return JSONDeserializer().deserialize(body)
# NOTE(sdague): I didn't start the fire, however here is what all
# of this is about.
#
# In the legacy v2 code stack, extensions could extend actions
# with a generator that let 1 method be split into a top and
# bottom half. The top half gets executed before the main
# processing of the request (so effectively gets to modify the
# request before it gets to the main method).
#
# Returning a response triggers a shortcut to fail out. The
# response will nearly always be a failure condition, as it ends
# up skipping further processing one level up from here.
#
# This then passes on the list of extensions, in reverse order,
# on. post_process will run through all those, again with same
# basic logic.
#
# In tree this is only used in the legacy v2 stack, and only in
# the DiskConfig and SchedulerHints from what I can see.
#
# pre_process_extensions can be removed when the legacyv2 code
# goes away. post_process_extensions can be massively simplified
# at that point.
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
for ext in extensions:
if inspect.isgeneratorfunction(ext):
response = None
# If it's a generator function, the part before the
# yield is the preprocessing stage
try:
with ResourceExceptionHandler():
gen = ext(req=request, **action_args)
response = next(gen)
except Fault as ex:
response = ex
# We had a response...
if response:
return response, []
# No response, queue up generator for post-processing
post.append(gen)
else:
# Regular functions only perform post-processing
post.append(ext)
# None is response, it means we keep going. We reverse the
# extension list for post-processing.
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except StopIteration:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except exception.VersionNotFoundForAPIMethod:
# If an attached extension (@wsgi.extends) for the
# method has no version match its not an error. We
# just don't run the extends code
continue
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None
def _should_have_body(self, request):
return request.method in _METHODS_WITH_BODY
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
if self.support_api_request_version:
# Set the version of the API requested based on the header
try:
request.set_api_version_request()
except exception.InvalidAPIVersionString as e:
return Fault(webob.exc.HTTPBadRequest(
explanation=e.format_message()))
except exception.InvalidGlobalAPIVersion as e:
return Fault(webob.exc.HTTPNotAcceptable(
explanation=e.format_message()))
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
# NOTE(sdague): we filter out InvalidContentTypes early so we
# know everything is good from here on out.
try:
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPUnsupportedMediaType(explanation=msg))
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
return self._process_stack(request, action, action_args,
content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
if body:
msg = _("Action: '%(action)s', calling method: %(meth)s, body: "
"%(body)s") % {'action': action,
'body': six.text_type(body, 'utf-8'),
'meth': str(meth)}
LOG.debug(strutils.mask_password(msg))
else:
LOG.debug("Calling method '%(meth)s'",
{'meth': str(meth)})
# Now, deserialize the request body...
try:
contents = {}
if self._should_have_body(request):
# allow empty body with PUT and POST
if request.content_length == 0:
contents = {'body': None}
else:
contents = self.deserialize(body)
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('nova.context')
if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request URL: URL's project_id '%(project_id)s'"
" doesn't match Context's project_id"
" '%(context_project_id)s'") % \
{'project_id': project_id,
'context_project_id': context.project_id}
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if type(action_result) is dict or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
# Do a preserialize to set up the response object
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept)
if hasattr(response, 'headers'):
for hdr, val in list(response.headers.items()):
# Headers must be utf-8 strings
response.headers[hdr] = utils.utf8(val)
if not request.api_version_request.is_null():
response.headers[API_VERSION_REQUEST_HEADER] = \
'compute ' + request.api_version_request.get_string()
response.headers[LEGACY_API_VERSION_REQUEST_HEADER] = \
request.api_version_request.get_string()
response.headers.add('Vary', API_VERSION_REQUEST_HEADER)
response.headers.add('Vary', LEGACY_API_VERSION_REQUEST_HEADER)
return response
def get_method(self, request, action, content_type, body):
meth, extensions = self._get_method(request,
action,
content_type,
body)
if self.inherits:
_meth, parent_ext = self.inherits.get_method(request,
action,
content_type,
body)
extensions.extend(parent_ext)
return meth, extensions
def _get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
action not in _ROUTES_METHODS + ['action']):
# Propagate the error
raise
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
action_name = action_peek(body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
try:
return method(req=request, **action_args)
except exception.VersionNotFoundForAPIMethod:
# We deliberately don't return any message information
# about the exception to the user so it looks as if
# the method is simply not implemented.
return Fault(webob.exc.HTTPNotFound())
class ResourceV21(Resource):
support_api_request_version = True
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
versioned_methods = None
# start with wsgi actions from base classes
for base in bases:
actions.update(getattr(base, 'wsgi_actions', {}))
if base.__name__ == "Controller":
# NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute
# between API controller class creations. This allows us
# to use a class decorator on the API methods that doesn't
# require naming explicitly what method is being versioned as
# it can be implicit based on the method decorated. It is a bit
# ugly.
if VER_METHOD_ATTR in base.__dict__:
versioned_methods = getattr(base, VER_METHOD_ATTR)
delattr(base, VER_METHOD_ATTR)
for key, value in cls_dict.items():
if not callable(value):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
if versioned_methods:
cls_dict[VER_METHOD_ATTR] = versioned_methods
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
@six.add_metaclass(ControllerMetaclass)
class Controller(object):
"""Default controller."""
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
def __getattribute__(self, key):
def version_select(*args, **kwargs):
"""Look for the method which matches the name supplied and version
constraints and calls it with the supplied arguments.
@return: Returns the result of the method called
@raises: VersionNotFoundForAPIMethod if there is no method which
matches the name and version constraints
"""
# The first arg to all versioned methods is always the request
# object. The version for the request is attached to the
# request object
if len(args) == 0:
ver = kwargs['req'].api_version_request
else:
ver = args[0].api_version_request
func_list = self.versioned_methods[key]
for func in func_list:
if ver.matches(func.start_version, func.end_version):
# Update the version_select wrapper function so
# other decorator attributes like wsgi.response
# are still respected.
functools.update_wrapper(version_select, func.func)
return func.func(self, *args, **kwargs)
# No version match
raise exception.VersionNotFoundForAPIMethod(version=ver)
try:
version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR)
except AttributeError:
# No versioning on this class
return object.__getattribute__(self, key)
if version_meth_dict and \
key in object.__getattribute__(self, VER_METHOD_ATTR):
return version_select
return object.__getattribute__(self, key)
# NOTE(cyeoh): This decorator MUST appear first (the outermost
# decorator) on an API method for it to work correctly
@classmethod
def api_version(cls, min_ver, max_ver=None):
"""Decorator for versioning api methods.
Add the decorator to any method which takes a request object
as the first parameter and belongs to a class which inherits from
wsgi.Controller.
@min_ver: string representing minimum version
@max_ver: optional string representing maximum version
"""
def decorator(f):
obj_min_ver = api_version.APIVersionRequest(min_ver)
if max_ver:
obj_max_ver = api_version.APIVersionRequest(max_ver)
else:
obj_max_ver = api_version.APIVersionRequest()
# Add to list of versioned methods registered
func_name = f.__name__
new_func = versioned_method.VersionedMethod(
func_name, obj_min_ver, obj_max_ver, f)
func_dict = getattr(cls, VER_METHOD_ATTR, {})
if not func_dict:
setattr(cls, VER_METHOD_ATTR, func_dict)
func_list = func_dict.get(func_name, [])
if not func_list:
func_dict[func_name] = func_list
func_list.append(new_func)
# Ensure the list is sorted by minimum version (reversed)
# so later when we work through the list in order we find
# the method which has the latest version which supports
# the version requested.
is_intersect = Controller.check_for_versions_intersection(
func_list)
if is_intersect:
raise exception.ApiVersionsIntersect(
name=new_func.name,
min_ver=new_func.start_version,
max_ver=new_func.end_version,
)
func_list.sort(key=lambda f: f.start_version, reverse=True)
return f
return decorator
@staticmethod
def is_valid_body(body, entity_name):
if not (body and entity_name in body):
return False
def is_dict(d):
try:
d.get(None)
return True
except AttributeError:
return False
return is_dict(body[entity_name])
@staticmethod
def check_for_versions_intersection(func_list):
"""Determines whether function list contains version intervals
intersections or not. General algorithm:
https://en.wikipedia.org/wiki/Intersection_algorithm
:param func_list: list of VersionedMethod objects
:return: boolean
"""
pairs = []
counter = 0
for f in func_list:
pairs.append((f.start_version, 1, f))
pairs.append((f.end_version, -1, f))
def compare(x):
return x[0]
pairs.sort(key=compare)
for p in pairs:
counter += p[1]
if counter > 1:
return True
return False
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
429: "overLimit",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
for key, value in list(self.wrapped_exc.headers.items()):
self.wrapped_exc.headers[key] = str(value)
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
user_locale = req.best_match_language()
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
explanation = self.wrapped_exc.explanation
LOG.debug("Returning %(code)s to user: %(explanation)s",
{'code': code, 'explanation': explanation})
explanation = i18n.translate(explanation, user_locale)
fault_data = {
fault_name: {
'code': code,
'message': explanation}}
if code == 413 or code == 429:
retry = self.wrapped_exc.headers.get('Retry-After', None)
if retry:
fault_data[fault_name]['retryAfter'] = retry
if not req.api_version_request.is_null():
self.wrapped_exc.headers[API_VERSION_REQUEST_HEADER] = \
'compute ' + req.api_version_request.get_string()
self.wrapped_exc.headers[LEGACY_API_VERSION_REQUEST_HEADER] = \
req.api_version_request.get_string()
self.wrapped_exc.headers.add('Vary', API_VERSION_REQUEST_HEADER)
self.wrapped_exc.headers.add('Vary',
LEGACY_API_VERSION_REQUEST_HEADER)
self.wrapped_exc.content_type = 'application/json'
self.wrapped_exc.charset = 'UTF-8'
self.wrapped_exc.text = JSONDictSerializer().serialize(fault_data)
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
class RateLimitFault(webob.exc.HTTPException):
"""Rate-limited request response."""
def __init__(self, message, details, retry_time):
"""Initialize new `RateLimitFault` with relevant information."""
hdrs = RateLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPTooManyRequests(headers=hdrs)
self.content = {
"overLimit": {
"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
"retryAfter": hdrs['Retry-After'],
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""Return the wrapped exception with a serialized body conforming
to our error format.
"""
user_locale = request.best_match_language()
self.content['overLimit']['message'] = \
i18n.translate(self.content['overLimit']['message'], user_locale)
self.content['overLimit']['details'] = \
i18n.translate(self.content['overLimit']['details'], user_locale)
content = JSONDictSerializer().serialize(self.content)
self.wrapped_exc.charset = 'UTF-8'
self.wrapped_exc.content_type = "application/json"
self.wrapped_exc.text = content
return self.wrapped_exc
| |
import re
from pynmea.utils import checksum_calc
class NMEASentence(object):
""" Base sentence class. This is used to pull apart a sentence.
It will not have any real reference to what things mean. Things that
subclass this base class should all the additional functionality.
"""
def __init__(self, parse_map):
self.sen_type = None
self.parse_map = parse_map
def _parse(self, nmea_str):
""" Tear the sentence apart, grabbing the name on the way. Create a
parts attribute on the class and fill in the sentence type in
sen_type
"""
self.nmea_sentence = nmea_str
self.parts = nmea_str.split(',')
chksum_regex = re.compile(r".+((\*{1})(?i)(?P<chksum>[0-9a-f]{2}))$")
m = chksum_regex.match(nmea_str)
if m:
self.checksum = m.groupdict()['chksum']
d, par, ck = self.parts.pop().rpartition('*')
self.parts.extend([d])
#if '*' in self.parts[-1]:
#d, par, ck = self.parts.pop().rpartition('*')
#self.parts.extend([d])
self.sen_type = self.parts[0]
if self.parts[0].startswith('$'):
self.parts[0] = self.parts[0][1:]
self.sen_type = self.parts[0]
def parse(self, nmea_str, ignore_err=False):
""" Use the parse map. Parse map should be in the format:
(('Field name', 'field_name'),
('Field name', 'field_name'))
Where the first entry in the tuple is the human readable name
and the second is the parameter name
"""
self._parse(nmea_str)
#assert len(self.parts[1:]) <= len(self.parse_map)
parts_len = len(self.parts) - 1
for index, item in enumerate(self.parse_map):
if index + 1 > parts_len:
break
setattr(self, item[1], self.parts[index + 1])
#for index, item in enumerate(self.parts[1:]):
#setattr(self, self.parse_map[index][1], item)
def check_chksum(self):
# If there is no checksum, raise AssertionError
assert hasattr(self, 'checksum')
result = checksum_calc(self.nmea_sentence)
return (result.upper() == self.checksum.upper())
# ---------------------------------------------------------------------------- #
# Here are all the currently supported sentences. All should eventually be
# supported. They are being added as properties and other useful functions are
# implimented. Unit tests are also provided.
# ---------------------------------------------------------------------------- #
class GPAAM(NMEASentence):
""" Waypoint Arrival Alarm
"""
def __init__(self):
parse_map = (
("Arrival Circle Entered", "arrival_circ_entered"),
("Perpendicular Passed", "perp_passed"),
("Circle Radius", "circle_rad"),
("Nautical Miles", "circle_rad_unit"),
("Waypoint ID", "waypoint_id"))
super(GPAAM, self).__init__(parse_map)
class GPALM(NMEASentence):
""" GPS Almanac data
"""
def __init__(self):
parse_map = (("Total number of messages", "total_num_msgs"),
("Message number", "msg_num"),
("Satellite PRN number", "sat_prn_num"), # 01 - 32
("GPS week number", "gps_week_num"), # Week since Jan 6 1980
("SV Health, bits 17-24 of each almanac page", "sv_health"),
("Eccentricity", "eccentricity"),
("Almanac Reference Time", "alamanac_ref_time"),
("Inclination Angle", "inc_angle"),
("Rate of right ascension", "rate_right_asc"),
("Root of semi-major axis", "root_semi_major_axis"),
("Argument of perigee", "arg_perigee"),
("Longitude of ascension node", "lat_asc_node"),
("Mean anomaly", "mean_anom"),
("F0 Clock parameter", "f0_clock_param"),
("F1 Clock parameter", "f1_clock_param"))
super(GPALM, self).__init__(parse_map)
class GPAPA(NMEASentence):
""" Autopilot Sentence "A"
"""
def __init__(self):
parse_map = (
("General Status", "status_gen"),
("Cycle lock Status", "status_cycle_lock"),
("Cross Track Error Magnitude", "cross_track_err_mag"),
("Direction to Steer (L or R)", "dir_steer"),
("Cross Track Units (Nautical Miles or KM)", "cross_track_unit"),
("Arrival Circle Entered", "arr_circle_entered"), # A = True
("Perpendicular passed at waypoint", "perp_passed"), # A = True
("Bearing origin to destination", "bearing_to_dest"),
("Bearing type", "bearing_type"), # M = Magnetic, T = True
("Destination waypoint ID", "dest_waypoint_id"))
super(GPAPA, self).__init__(parse_map)
class GPAPB(NMEASentence):
""" Autopilot Sentence "B"
"""
def __init__(self):
parse_map = (
("General Status", "status_gen"),
("Cycle lock Status", "status_cycle_lock"),
("Cross Track Error Magnitude", "cross_track_err_mag"),
("Direction to Steer (L or R)", "dir_steer"),
("Cross Track Units (Nautical Miles or KM)", "cross_track_unit"),
("Arrival Circle Entered", "arr_circle_entered"), # A = True
("Perpendicular passed at waypoint", "perp_passed"), # A = True
("Bearing origin to destination", "bearing_to_dest"),
("Bearing type", "bearing_type"), # M = Magnetic, T = True
("Destination waypoint ID", "dest_waypoint_id"),
("Bearing, present position to dest", "bearing_pres_dest"),
("Bearing to destination, type", "bearing_pres_dest_type"), # M = Magnetic, T = True
("Heading to steer to destination", "heading_to_dest"),
("Heading to steer to destination type", "heading_to_dest_type")) # M = Magnetic, T = True
super(GPAPB, self).__init__(parse_map)
class GPBEC(NMEASentence):
""" Bearing & Distance to Waypoint, Dead Reckoning
"""
def __init__(self):
parse_map = (
("Timestamp", "timestamp"),
("Waypoint Latitude", "waypoint_lat"),
("Waypoint Latitude direction", "waypoint_lat_dir"),
("Waypoint Longitude", "waypoint_lon"),
("Waypoint Longitude direction", "waypoint_lon_dir"),
("Bearing, true", "bearing_true"),
("Bearing True symbol", "bearing_true_sym"), # T = true
("Bearing Magnetic", "bearing_mag"),
("Bearing Magnetic symbol", "bearing_mag_sym"),
("Nautical Miles", "nautical_miles"),
("Nautical Miles symbol", "nautical_miles_sym"),
("Waypoint ID", "waypoint_id"),
("FAA mode indicator", "faa_mode"))
super(GPBEC, self).__init__(parse_map)
class GPBOD(NMEASentence):
def __init__(self):
# 045.,T,023.,M,DEST,START
parse_map = (('Bearing True', 'bearing_t'),
('Bearing True Type', 'bearing_t_type'),
('Bearing Magnetic', 'bearing_mag'),
('Bearing Magnetic Type', 'bearing_mag_type'),
('Destination', 'dest'),
('Start', 'start'))
super(GPBOD, self).__init__(parse_map)
@property
def bearing_true(self):
return ','.join([self.bearing_t, self.bearing_t_type])
@property
def bearing_magnetic(self):
return ','.join([self.bearing_mag, self.bearing_mag_type])
@property
def destination(self):
return self.dest
@property
def origin(self):
return self.start
class GPBWC(NMEASentence):
def __init__(self):
parse_map = (
('Timestamp', 'timestamp'),
('Latitude of next Waypoint', 'lat_next'),
('Latitude of next Waypoint Direction', 'lat_next_direction'),
('Longitude of next Waypoint', 'lon_next'),
('Longitude of next Waypoint Direction', 'lon_next_direction'),
('True track to waypoint', 'true_track'),
('True Track Symbol', 'true_track_sym'),
('Magnetic track to waypoint', 'mag_track'),
('Magnetic Symbol', 'mag_sym'),
('Range to waypoint', 'range_next'),
('Unit of range', 'range_unit'),
('Waypoint Name', 'waypoint_name'))
#('Checksum', 'checksum'))
super(GPBWC, self).__init__(parse_map)
class GPBWR(NMEASentence):
def __init__(self):
parse_map = (
('Timestamp', 'timestamp'),
('Latitude of next Waypoint', 'lat_next'),
('Latitude of next Waypoint Direction', 'lat_next_direction'),
('Longitude of next Waypoint', 'lon_next'),
('Longitude of next Waypoint Direction', 'lon_next_direction'),
('True track to waypoint', 'true_track'),
('True Track Symbol', 'true_track_sym'),
('Magnetic track to waypoint', 'mag_track'),
('Magnetic Symbol', 'mag_sym'),
('Range to waypoint', 'range_next'),
('Unit of range', 'range_unit'),
('Waypoint Name', 'waypoint_name'))
#('Checksum', 'checksum'))
super(GPBWR, self).__init__(parse_map)
class GPGGA(NMEASentence):
def __init__(self):
parse_map = (
('Timestamp', 'timestamp'),
('Latitude', 'lat'),
('Latitude Direction', 'lat_direction'),
('Longitude', 'lon'),
('Longitude Direction', 'lon_direction'),
('GPS Quality Indicator', 'gps_qual'),
('Number of Satellites in use', 'num_sats'),
('Horizontal Dilution of Precision', 'horizontal_dil'),
('Antenna Alt above sea level (mean)', 'antenna_altitude'),
('Units of altitude (meters)', 'altitude_units'),
('Geoidal Separation', 'geo_sep'),
('Units of Geoidal Separation (meters)', 'geo_sep_units'),
('Age of Differential GPS Data (secs)', 'age_gps_data'),
('Differential Reference Station ID', 'ref_station_id'))
#('Checksum', 'checksum'))
super(GPGGA, self).__init__(parse_map)
@property
def latitude(self):
return fixlonlat(float(self.lat))
@property
def longitude(self):
return fixlonlat(float(self.lon))
class GPBWW(NMEASentence):
""" Bearing, Waypoint to Waypoint
"""
def __init__(self):
parse_map = (
("Bearing degrees True", "bearing_deg_true"),
("Bearing degrees True Symbol", "bearing_deg_true_sym"),
("Bearing degrees Magnitude", "bearing_deg_mag"),
("Bearing degrees Magnitude Symbol", "bearing_deg_mag_sym"),
("Destination Waypoint ID", "waypoint_id_dest"),
("Origin Waypoint ID", "waypoint_id_orig"))
super(GPBWW, self).__init__(parse_map)
class GPGLL(NMEASentence):
def __init__(self):
parse_map = (
('Latitude', 'lat'),
('Latitude Direction', 'lat_dir'),
('Longitude', 'lon'),
('Longitude Direction', 'lon_dir'),
('Timestamp', 'timestamp'),
('Data Validity', "data_valid"))
super(GPGLL, self).__init__(parse_map)
self._use_data_validity = False
#def _parse(self, nmea_str):
#""" GPGGL Allows for a couple of different formats.
#The all have lat,direction,lon,direction
#but one may have timestamp,data_validity
#while the other has only checksum
#We shall treat data_validity as a checksum and always
#add in a timestamp field
#"""
#self.nmea_sentence = nmea_str
#self.parts = nmea_str.split(',')
#chksum_regex = re.compile(r".+((\*{1})(?i)(?P<chksum>[0-9a-f]{2}))$")
#m = chksum_regex.match(nmea_str)
#if m:
#self.checksum = m.groupdict()['chksum']
##if '*' in self.parts[-1]:
### There is a checksum but no timestamp + data_validity.
### Add an empty field for the timestamp and indicate that when
### validating the checksum, we should use validity, not a
### calculation
##d, par, ck = self.parts.pop().rpartition('*')
##self.parts.extend([d, ''])
##self._use_data_validity = True
#self.sen_type = self.parts[0]
#if self.parts[0].startswith('$'):
#self.parts[0] = self.parts[0][1:]
#self.sen_type = self.parts[0]
#def check_chksum(self):
#""" Override check_checksum. If it has been detected that
#the checksum field contains "A" for valid data and something else
#for invalid, do a check based on thsi information. Otherwise, call
#to original checksum code from the superclass
#"""
## If we are looking for an "A" character
#if self._use_data_validity:
#if self.checksum == 'A':
#return True
#else:
#return False
#else:
## Otherwise, call the superclass version
#return super(GPGLL, self).check_chksum()
@property
def latitude(self):
return fixlonlat(float(self.lat))
@property
def longitude(self):
return fixlonlat(float(self.lon))
@property
def lat_direction(self):
mapping = {'N': 'North', 'S': 'South'}
return mapping[self.lat_dir.upper()]
@property
def lon_direction(self):
mapping = {"E": "East", "W": "West"}
return mapping[self.lon_dir.upper()]
def fixlonlat(number):
number/=100.0
high = int(number)
low = number-high
low/=60.0
return high+low*100
class GPGSA(NMEASentence):
def __init__(self):
parse_map = (
('Mode', 'mode'),
('Mode fix type', 'mode_fix_type'),
('SV ID01', 'sv_id01'),
('SV ID02', 'sv_id02'),
('SV ID03', 'sv_id03'),
('SV ID04', 'sv_id04'),
('SV ID05', 'sv_id05'),
('SV ID06', 'sv_id06'),
('SV ID07', 'sv_id07'),
('SV ID08', 'sv_id08'),
('SV ID09', 'sv_id09'),
('SV ID10', 'sv_id10'),
('SV ID11', 'sv_id11'),
('SV ID12', 'sv_id12'),
('PDOP (Dilution of precision)', 'pdop'),
('HDOP (Horizontal DOP)', 'hdop'),
('VDOP (Vertical DOP)', 'vdop'))
#('Checksum', 'checksum'))
super(GPGSA, self).__init__(parse_map)
class GPGSV(NMEASentence):
def __init__(self):
parse_map = (
('Number of messages of type in cycle', 'num_messages'),
('Message Number', 'msg_num'),
('Total number of SVs in view', 'num_sv_in_view'),
('SV PRN number 1', 'sv_prn_num_1'),
('Elevation in degrees 1', 'elevation_deg_1'), # 90 max
('Azimuth, deg from true north 1', 'azimuth_1'), # 000 to 159
('SNR 1', 'snr_1'), # 00-99 dB
('SV PRN number 2', 'sv_prn_num_2'),
('Elevation in degrees 2', 'elevation_deg_2'), # 90 max
('Azimuth, deg from true north 2', 'azimuth_2'), # 000 to 159
('SNR 2', 'snr_2'), # 00-99 dB
('SV PRN number 3', 'sv_prn_num_3'),
('Elevation in degrees 3', 'elevation_deg_3'), # 90 max
('Azimuth, deg from true north 3', 'azimuth_3'), # 000 to 159
('SNR 3', 'snr_3'), # 00-99 dB
('SV PRN number 4', 'sv_prn_num_4'),
('Elevation in degrees 4', 'elevation_deg_4'), # 90 max
('Azimuth, deg from true north 4', 'azimuth_4'), # 000 to 159
('SNR 4', 'snr_4')) # 00-99 dB
#('Checksum', 'checksum'))
super(GPGSV, self).__init__(parse_map)
class GPHDG(NMEASentence):
""" NOTE! This is a GUESS as I cannot find an actual spec
telling me the fields. Updates are welcome!
"""
def __init__(self):
parse_map = (
("Heading", "heading"),
("Deviation", "deviation"),
("Deviation Direction", "dev_dir"),
("Variation", "variation"),
("Variation Direction", "var_dir"))
#("Checksum", "checksum"))
super(GPHDG, self).__init__(parse_map)
class GPHDT(NMEASentence):
def __init__(self):
parse_map = (
("Heading", "heading"),
("True", "hdg_true"))
#("Checksum", "checksum"))
super(GPHDT, self).__init__(parse_map)
class GPR00(NMEASentence):
def __init__(self):
parse_map = (
("Waypoint List", "waypoint_list"),)
#("Checksum", "checksum"))
super(GPR00, self).__init__(parse_map)
def parse(self, nmea_str):
""" As the length of the sentence is variable (there can be many or few
waypoints), parse is overridden to do something special with the
different parts
"""
self._parse(nmea_str)
new_parts = [self.parts[0]]
new_parts.append(self.parts[1:])
#new_parts.append(self.parts[-1])
self.parts = new_parts
for index, item in enumerate(self.parts[1:]):
setattr(self, self.parse_map[index][1], item)
class GPRMA(NMEASentence):
def __init__(self):
parse_map = (
("Data status", "data_status"),
("Latitude", "lat"),
("Latitude Direction", "lat_dir"),
("Longitude", "lon"),
("Longitude Direction", "lon_dir"),
("Not Used 1", "not_used_1"),
("Not Used 2", "not_used_2"),
("Speed over ground", "spd_over_grnd"), # Knots
("Course over ground", "crse_over_grnd"),
("Variation", "variation"),
("Variation Direction", "var_dir"))
#("Checksum", "checksum"))
super(GPRMA, self).__init__(parse_map)
class GPRMB(NMEASentence):
""" Recommended Minimum Navigation Information
"""
def __init__(self):
parse_map = (
("Data Validity", "validity"),
("Cross Track Error", "cross_track_error"), # nautical miles, 9.9 max
("Cross Track Error, direction to corrent", "cte_correction_dir"),
("Origin Waypoint ID", "origin_waypoint_id"),
("Destination Waypoint ID", "dest_waypoint_id"),
("Destination Waypoint Latitude", "dest_lat"),
("Destination Waypoint Lat Direction", "dest_lat_dir"),
("Destination Waypoint Longitude", "dest_lon"),
("Destination Waypoint Lon Direction", "dest_lon_dir"),
("Range to Destination", "dest_range"), # Nautical Miles
("True Bearing to Destination", "dest_true_bearing"),
("Velocity Towards Destination", "dest_velocity"), # Knots
("Arrival Alarm", "arrival_alarm")) # A = Arrived, V = Not arrived
#("Checksum", "checksum"))
super(GPRMB, self).__init__(parse_map)
class GPRMC(NMEASentence):
""" Recommended Minimum Specific GPS/TRANSIT Data
"""
def __init__(self):
parse_map = (("Timestamp", "timestamp"),
("Data Validity", "data_validity"),
("Latitude", "lat"),
("Latitude Direction", "lat_dir"),
("Longitude", "lon"),
("Longitude Direction", "lon_dir"),
("Speed Over Ground", "spd_over_grnd"),
("True Course", "true_course"),
("Datestamp", "datestamp"),
("Magnetic Variation", "mag_variation"),
("Magnetic Variation Direction", "mag_var_dir"))
#("Checksum", "checksum"))
super(GPRMC, self).__init__(parse_map)
class GPRTE(NMEASentence):
""" Routes
"""
def __init__(self):
parse_map = (
("Number of sentences in sequence", "num_in_seq"),
("Sentence Number", "sen_num"),
("Start Type", "start_type"), # The first in the list is either current route or waypoint
("Name or Number of Active Route", "active_route_id"),
("Waypoint List", "waypoint_list"))
#("Checksum", "checksum"))
super(GPRTE, self).__init__(parse_map)
def parse(self, nmea_str):
""" As the length of the sentence is variable (there can be many or few
waypoints), parse is overridden to do something special with the
different parts
"""
self._parse(nmea_str)
new_parts = []
new_parts.extend(self.parts[0:5])
new_parts.append(self.parts[5:])
self.parts = new_parts
for index, item in enumerate(self.parts[1:]):
setattr(self, self.parse_map[index][1], item)
class GPSTN(NMEASentence):
""" NOTE: No real data could be found for examples of the actual spec so
it is a guess that there may be a checksum on the end
"""
def __init__(self):
parse_map = (
("Talker ID Number", "talker_id"),) # 00 - 99
#("Checksum", "checksum"))
super(GPSTN, self).__init__(parse_map)
class GPTRF(NMEASentence):
""" Transit Fix Data
"""
def __init__(self):
parse_map = (
("Timestamp (UTC)", "timestamp"),
("Date (DD/MM/YY", "date"),
("Latitude", "lat"),
("Latitude Direction", "lat_dir"),
("Longitude", "lon"),
("Longitude Direction", "lon_dir"),
("Elevation Angle", "ele_angle"),
("Number of Iterations", "num_iterations"),
("Number of Doppler Intervals", "num_doppler_intervals"),
("Update Distance", "update_dist"), # Nautical Miles
("Satellite ID", "sat_id"))
super(GPTRF, self).__init__(parse_map)
class GPVBW(NMEASentence):
""" Dual Ground/Water Speed
"""
def __init__(self):
parse_map = (
("Longitudinal Water Speed", "lon_water_spd"), # Knots
("Transverse Water Speed", "trans_water_spd"), # Knots
("Water Speed Data Validity", "data_validity_water_spd"),
("Longitudinal Ground Speed", "lon_grnd_spd"), # Knots
("Transverse Ground Speed", "trans_grnd_spd"), # Knots
("Ground Speed Data Validity", "data_validity_grnd_spd"))
#("Checksum", "checksum"))
super(GPVBW, self).__init__(parse_map)
class GPVTG(NMEASentence):
""" Track Made Good and Ground Speed
"""
def __init__(self):
parse_map = (
("True Track made good", "true_track"),
("True Track made good symbol", "true_track_sym"),
("Magnetic Track made good", "mag_track"),
("Magnetic Track symbol", "mag_track_sym"),
("Speed over ground knots", "spd_over_grnd_kts"),
("Speed over ground symbol", "spd_over_grnd_kts_sym"),
("Speed over ground kmph", "spd_over_grnd_kmph"),
("Speed over ground kmph symbol", "spd_over_grnd_kmph_sym"))
super(GPVTG, self).__init__(parse_map)
class GPWCV(NMEASentence):
""" Waypoint Closure Velocity
"""
def __init__(self):
parse_map = (
("Velocity", "velocity"),
("Velocity Units", "vel_units"), # Knots
("Waypoint ID", "waypoint_id"))
super(GPWCV, self).__init__(parse_map)
class GPWNC(NMEASentence):
""" Distance, Waypoint to Waypoint
"""
def __init__(self):
parse_map = (
("Distance, Nautical Miles", "dist_nautical_miles"),
("Distance Nautical Miles Unit", "dist_naut_unit"),
("Distance, Kilometers", "dist_km"),
("Distance, Kilometers Unit", "dist_km_unit"),
("Origin Waypoint ID", "waypoint_origin_id"),
("Destination Waypoint ID", "waypoint_dest_id"))
super(GPWNC, self).__init__(parse_map)
class GPWPL(NMEASentence):
""" Waypoint Location
"""
def __init__(self):
parse_map = (
("Latitude", "lat"),
("Latitude Direction", "lat_dir"),
("Longitude", "lon"),
("Longitude Direction", "lon_dir"),
("Waypoint ID", "waypoint_id"))
super(GPWPL, self).__init__(parse_map)
class GPXTE(NMEASentence):
""" Cross-Track Error, Measured
"""
def __init__(self):
parse_map = (("General Warning Flag", "warning_flag"),
("Lock flag (Not Used)", "lock_flag"),
("Cross Track Error Distance", "cross_track_err_dist"),
("Correction Direction (L or R)", "correction_dir"),
("Distance Units", "dist_units"))
super(GPXTE, self).__init__(parse_map)
class GPZDA(NMEASentence):
def __init__(self):
parse_map = (
("Timestamp", "timestamp"), # hhmmss.ss = UTC
("Day", "day"), # 01 to 31
("Month", "month"), # 01 to 12
("Year", "year"), # Year = YYYY
("Local Zone Description", "local_zone"), # 00 to +/- 13 hours
("Local Zone Minutes Description", "local_zone_minutes")) # same sign as hours
#("Checksum", "checksum"))
super(GPZDA, self).__init__(parse_map)
# ---------------------------------- Not Yet Implimented --------------------- #
# ---------------------------------------------------------------------------- #
#class GPDBT(NMEASentence):
# """ Depth Below Transducer
# """
# def __init__(self):
# parse_map = ()
# super(GPDBT).__init__(parse_map)
#class GPDPT(NMEASentence):
# """ Heading - Deviation and Variation
# """
# def __init__(self):
# parse_map = ()
# super(GPDPT).__init__(parse_map)
#class GPFSI(NMEASentence):
# """ Frequency Set Information
# """
# def __init__(self):
# parse_map = ()
# super(GPFSI).__init__(parse_map)
#class GPGLC(NMEASentence):
# """ Geographic Position, Loran-C
# """
# def __init__(self):
# parse_map = ()
# super(GPGLC).__init__(parse_map)
#class GPGXA(NMEASentence):
# """ TRANSIT Position
# """
# def __init__(self):
# parse_map = ()
# super(GPGXA).__init__(parse_map)
#class GPHSC(NMEASentence):
# """ Heading Steering Command
# """
# def __init__(self):
# parse_map = ()
# super(GPHSC).__init__(parse_map)
#class GPLCD(NMEASentence):
# """ Loran-C Signal Data
# """
# def __init__(self):
# parse_map = ()
# super(GPLCD).__init__(parse_map)
#class GPMTA(NMEASentence):
# """ Air Temperature (to be phased out)
# """
# def __init__(self):
# parse_map = ()
# super(GPMTA).__init__(parse_map)
#class GPMTW(NMEASentence):
# """ Water Temperature
# """
# def __init__(self):
# parse_map = ()
# super(GPMTW).__init__(parse_map)
#class GPMWD(NMEASentence):
# """ Wind Direction
# """
# def __init__(self):
# parse_map = ()
# super(GPMWD).__init__(parse_map)
#class GPMWV(NMEASentence):
# """ Wind Speed and Angle
# """
# def __init__(self):
# parse_map = ()
# super(GPMWV).__init__(parse_map)
#class GPOLN(NMEASentence):
# """ Omega Lane Numbers
# """
# def __init__(self):
# parse_map = ()
# super(GPOLN).__init__(parse_map)
#class GPOSD(NMEASentence):
# """ Own Ship Data
# """
# def __init__(self):
# parse_map = ()
# super(GPOSD).__init__(parse_map)
#class GPROT(NMEASentence):
# """ Rate of Turn
# """
# def __init__(self):
# parse_map = ()
# super(GPROT).__init__(parse_map)
#class GPRPM(NMEASentence):
# """ Revolutions
# """
# def __init__(self):
# parse_map = ()
# super(GPRPM).__init__(parse_map)
#class GPRSA(NMEASentence):
# """ Rudder Sensor Angle
# """
# def __init__(self):
# parse_map = ()
# super(GPRSA).__init__(parse_map)
#class GPRSD(NMEASentence):
# """ RADAR System Data
# """
# def __init__(self):
# parse_map = ()
# super(GPRSD).__init__(parse_map)
#class GPSFI(NMEASentence):
# """ Scanning Frequency Information
# """
# def __init__(self):
# parse_map = ()
# super(GPSFI).__init__(parse_map)
#class GPTTM(NMEASentence):
# """ Tracked Target Message
# """
# def __init__(self):
# parse_map = ()
# super(GPTTM).__init__(parse_map)
#class GPVDR(NMEASentence):
# """ Set and Drift
# """
# def __init__(self):
# parse_map = ()
# super(GPVDR).__init__(parse_map)
#class GPVHW(NMEASentence):
# """ Water Speed and Heading
# """
# def __init__(self):
# parse_map = ()
# super(GPVHW).__init__(parse_map)
#class GPVLW(NMEASentence):
# """ Distance Traveled through the Water
# """
# def __init__(self):
# parse_map = ()
# super(GPVLW).__init__(parse_map)
#class GPVPW(NMEASentence):
# """ Speed, Measured Parallel to Wind
# """
# def __init__(self):
# parse_map = ()
# super(GPVPW).__init__(parse_map)
#class GPXDR(NMEASentence):
# """ Transducer Measurements
# """
# def __init__(self):
# parse_map = ()
# super(GPXDR).__init__(parse_map)
#class GPXTR(NMEASentence):
# """ Cross-Track Error, Dead Reckoning
# """
# def __init__(self):
# parse_map = ()
# super(GPXTR).__init__(parse_map)
#class GPZFO(NMEASentence):
# """ UTC & Time from Origin Waypoint
# """
# def __init__(self):
# parse_map = ()
# super(GPZFO).__init__(parse_map)
#class GPZTG(NMEASentence):
# """ UTC & Time to Destination Waypoint
# """
# def __init__(self):
# parse_map = ()
# super(GPZTG).__init__(parse_map)
# ---------------------------------------------------------------------------- #
# -------------------------- Unknown Formats --------------------------------- #
# ---------------------------------------------------------------------------- #
#class GPASD(NMEASentence):
# """ Auto-pilot system data (Unknown format)
# """
# def __init__(self):
# parse_map = ()
# super(GPASD).__init__()
# ---------------------------------------------------------------------------- #
# -------------------------- Obsolete Formats -------------------------------- #
# ---------------------------------------------------------------------------- #
#class GPDCN(NMEASentence):
# """ Decca Position (obsolete)
# """
# def __init__(self):
# parse_map = ()
# super(GPDCN).__init__(parse_map)
# PROPRIETRY SENTENCES
# -- GARMIN -- #
class PGRME(NMEASentence):
""" GARMIN Estimated position error
"""
def __init__(self):
parse_map = (("Estimated Horiz. Position Error", "hpe"),
("Estimated Horiz. Position Error Unit (M)", "hpe_unit"),
("Estimated Vert. Position Error", "vpe"),
("Estimated Vert. Position Error Unit (M)", "vpe_unit"),
("Estimated Horiz. Position Error", "osepe"),
("Overall Spherical Equiv. Position Error", "osepe_unit"))
super(PGRME, self).__init__(parse_map)
class PGRMM(NMEASentence):
""" GARMIN Map Datum
"""
def __init__(self):
parse_map = (('Currently Active Datum', 'datum'),)
super(PGRMM, self).__init__(parse_map)
class PGRMZ(NMEASentence):
""" GARMIN Altitude Information
"""
def __init__(self):
parse_map = (("Altitude", "altitude"),
("Altitude Units (Feet)", "altitude_unit"),
("Positional Fix Dimension (2=user, 3=GPS)",
"pos_fix_dim"))
super(PGRMZ, self).__init__(parse_map)
| |
"""Encapsulation of functionality provided by various batch schedulers.
Exports:
SLURMScheduler: Interface to SLURM batch scheduler.
Scheduler: Generic instantiator for all implemented schedulers.
"""
import logging
import os
from subprocess import CalledProcessError, check_output, STDOUT
from PCEHelper import pce_root
class _BatchScheduler(object):
"""Superclass for batch scheduler classes.
Subclasses must override the non-magic methods defined here.
"""
local_python = os.path.join(pce_root, 'src', 'env', 'bin', 'python')
@classmethod
def is_scheduler_for(cls, type):
"""Return boolean indicating whether the class provides an interface to
the batch scheduler type given.
Args:
type (str): Batch scheduler type.
Returns:
True if class provides interface to given batch scheduler, False if
not.
"""
pass
def get_batch_script(self, run_name, numtasks=4, num_nodes=1, email=None):
"""Return the batch script that runs a job as per args formatted for the
given batch scheduler.
Args:
run_name (str): Human-readable label for job run.
numtasks (int): Number of tasks to schedule.
num_nodes (int): Number of nodes to allocate for job.
email (str): Email to send results to upon completion. If None, no
email sent.
Returns:
Batch script implementing given attrs.
"""
pass
def schedule(self, proj_loc):
"""Schedule a job using the given batch scheduler.
Args:
proj_loc (str): Folder containing the batch script 'script.sh' for
the job to schedule.
Returns:
Result dict with the following fields:
status_code: Status code
status_msg: String giving detailed status info.
"""
pass
def check_status(self, scheduler_job_num):
"""Return job status from scheduler.
Args:
scheduler_job_num (int): Job number of the job to check state on as
given by the scheduler, not as given by OnRamp.
Returns:
2-Tuple with 0th item being error code and 1st item being a string
giving detailed status info.
"""
pass
def cancel_job(self, scheduler_job_num):
"""Cancel the given job.
Args:
scheduler_job_num (int): Job number, as given by the scheduler, of the
job to cancel.
Returns:
2-Tuple with 0th item being error code and 1st item being a string
giving detailed status info.
"""
pass
def __init__(self, type):
"""Set batch scheduler type and return the instance.
Args:
type (str): Batch scheduler type.
"""
self.logger = logging.getLogger('onramp')
class SLURMScheduler(_BatchScheduler):
@classmethod
def is_scheduler_for(cls, type):
"""Return boolean indicating whether the class provides an interface to
the batch scheduler type given.
Args:
type (str): Batch scheduler type.
Returns:
True if class provides interface to given batch scheduler, False if
not.
"""
return type == 'SLURM'
def get_batch_script(self, run_name, numtasks=4, num_nodes=1, email=None):
"""Return the batch script that runs a job as per args formatted for the
SLURM batch scheduler.
Args:
run_name (str): Human-readable label for job run.
numtasks (int): Number of tasks to schedule.
num_nodes (int): Number of nodes to allocate for job.
email (str): Email to send results to upon completion. If None, no
email sent.
Returns:
Batch script implementing given attrs.
"""
contents = '#!/bin/bash\n'
contents += '\n'
contents += '###################################\n'
contents += '# Slurm Submission options\n'
contents += '#\n'
contents += '#SBATCH --job-name=\"' + run_name + '\"\n'
contents += '#SBATCH -o output.txt\n'
contents += '#SBATCH -n ' + str(numtasks) + '\n'
if email:
self.logger.debug('%s configured for email reporting to %s'
% (run_name, email))
contents += '#SBATCH --mail-user=' + email + '\n'
contents += '###################################\n'
contents += '\n'
contents += '%s bin/onramp_run.py\n' % self.local_python
return contents
def schedule(self, proj_loc):
"""Schedule a job using the SLURM batch scheduler.
Args:
proj_loc (str): Folder containing the batch script 'script.sh' for
the job to schedule.
Returns:
Result dict with the following fields:
status_code: Status code
status_msg: String giving detailed status info.
"""
ret_dir = os.getcwd()
os.chdir(proj_loc)
try:
batch_output = check_output(['sbatch', 'script.sh'], stderr=STDOUT)
except CalledProcessError as e:
msg = 'Job scheduling call failed'
os.chdir(ret_dir)
return {
'status_code': e.returncode,
'msg': '%s: %s' % (msg, e.output),
'status_msg': '%s: %s' % (msg, e.output)
}
os.chdir(ret_dir)
output_fields = batch_output.strip().split()
if 'Submitted batch job' != ' '.join(output_fields[:-1]):
msg = 'Unexpeted output from sbatch'
self.logger.error(msg)
return {
'status_code': -7,
'status_msg': msg
}
try:
job_num = int(output_fields[3:][0])
except ValueError, IndexError:
msg = 'Unexpeted output from sbatch'
self.logger.error(msg)
return {
'status_code': -7,
'status_msg': msg
}
return {
'status_code': 0,
'status_msg': 'Job %d scheduled' % job_num,
'job_num': job_num
}
def check_status(self, scheduler_job_num):
"""Return job status from scheduler.
Args:
scheduler_job_num (int): Job number of the job to check state on as
given by the scheduler, not as given by OnRamp.
Returns:
2-Tuple with 0th item being error code and 1st item being a string
giving detailed status info.
"""
try:
job_info = check_output(['scontrol', 'show', 'job',
str(scheduler_job_num)])
except CalledProcessError as e:
msg = 'Job info call failed'
self.logger.error(msg)
return (-1, msg)
job_state = job_info.split('JobState=')[1].split()[0]
if job_state == 'RUNNING':
return (0, 'Running')
elif job_state == 'COMPLETED':
return (0, 'Done')
elif job_state == 'PENDING':
return (0, 'Queued')
elif job_state == 'FAILED':
msg = 'Job failed'
self.logger.error(msg)
return (-1, msg)
else:
msg = 'Unexpected job state from scheduler'
self.logger.error(msg)
return (-2, msg)
def cancel_job(self, scheduler_job_num):
"""Cancel the given job.
Args:
scheduler_job_num (int): Job number, as given by the scheduler, of the
job to cancel.
Returns:
2-Tuple with 0th item being error code and 1st item being a string
giving detailed status info.
"""
try:
result = check_output(['scancel', str(scheduler_job_num)], stderr=STDOUT)
except CalledProcessError as e:
msg = 'Job cancel call failed'
self.logger.error(msg)
return (-1, msg)
return (0, result)
class PBSScheduler(_BatchScheduler):
@classmethod
def is_scheduler_for(cls, type):
"""Return boolean indicating whether the class provides an interface to
the batch scheduler type given.
Args:
type (str): Batch scheduler type.
Returns:
True if class provides interface to given batch scheduler, False if
not.
"""
return type == 'PBS'
def get_batch_script(self, run_name, numtasks=4, num_nodes=1, email=None):
"""Return the batch script that runs a job as per args formatted for the
PBS batch scheduler.
Args:
run_name (str): Human-readable label for job run.
numtasks (int): Number of tasks to schedule.
num_nodes (int): Number of nodes to allocate for job.
email (str): Email to send results to upon completion. If None, no
email sent.
Returns:
Batch script implementing given attrs.
"""
script = '#!/bin/bash\n'
script += '\n'
script += '################################################\n'
script += '#PBS -l select=%d:mpiprocs=%d\n' % (num_nodes, numtasks)
script += '#PBS -N %s\n' % run_name
script += '#PBS -V\n'
script += '#PBS -j oe\n'
script += '#PBS -o output.txt\n'
script += '################################################\n'
script += '\n'
script += 'cd ${PBS_O_WORKDIR}\n'
script += '%s bin/onramp_run.py\n' % self.local_python
return script
def schedule(self, proj_loc):
"""Schedule a job using the PBS batch scheduler.
Args:
proj_loc (str): Folder containing the batch script 'script.sh' for
the job to schedule.
Returns:
Result dict with the following fields:
status_code: Status code
status_msg: String giving detailed status info.
"""
ret_dir = os.getcwd()
os.chdir(proj_loc)
try:
batch_output = check_output(['qsub', 'script.sh'], stderr=STDOUT)
except CalledProcessError as e:
msg = 'Job scheduling call failed'
os.chdir(ret_dir)
return {
'returncode': e.returncode,
'msg': '%s: %s' % (msg, e.output)
}
os.chdir(ret_dir)
output_fields = batch_output.strip().split('.')
try:
job_num = int(output_fields[0])
except ValueError, IndexError:
msg = 'Unexpeted output from sbatch'
self.logger.error(msg)
return {
'status_code': -7,
'status_msg': msg
}
return {
'status_code': 0,
'status_msg': 'Job %d scheduled' % job_num,
'job_num': job_num
}
def check_status(self, scheduler_job_num):
"""Return job status from scheduler.
Args:
scheduler_job_num (int): Job number of the job to check state on as
given by the scheduler, not as given by OnRamp.
Returns:
2-Tuple with 0th item being error code and 1st item being a string
giving detailed status info.
"""
try:
job_info = check_output(['qstat', '-i', str(scheduler_job_num)],
stderr=STDOUT)
except CalledProcessError as e:
if e.output.startswith('qstat: Unknown Job Id %d' % scheduler_job_num):
return (0, 'No info')
msg = 'Job info call failed: %s' % e.output
self.logger.error(msg)
return (-1, msg)
last_line = job_info.strip().split('\n')[-1:][0]
job_state = last_line.split()[9]
if (job_state == 'R'
or job_state == 'r'
or job_state == 's'
or job_state == 'S'
or job_state == 't'
or job_state == 'T'):
return (0, 'Running')
elif job_state == 'W' or job_state == 'H':
return (0, 'Queued')
elif job_state == 'E':
msg = 'Job failed'
# TODO: Can maybe add error info here by qstat -j job_list option.
self.logger.error(msg)
return (-1, msg)
elif job_state == 'd':
msg = 'Job scheduled for deletion'
self.logger.error(msg)
return (-1, msg)
else:
msg = 'Unexpected job state from scheduler'
self.logger.error(msg)
return (-2, msg)
def cancel_job(self, scheduler_job_num):
"""Cancel the given job.
Args:
scheduler_job_num (int): Job number, as given by the scheduler, of the
job to cancel.
Returns:
2-Tuple with 0th item being error code and 1st item being a string
giving detailed status info.
"""
try:
result = check_output(['qdel', str(scheduler_job_num)], stderr=STDOUT)
except CalledProcessError as e:
msg = 'Job cancel call failed'
self.logger.error(msg)
return (-1, msg)
return (0, result)
def Scheduler(type):
"""Instantiate the appropriate scheduler class for given type.
Args:
type (str): Identifier for batch scheduler type.
Returns:
Instance of a _BatchScheduler for given type.
"""
for cls in _BatchScheduler.__subclasses__():
if cls.is_scheduler_for(type):
return cls(type)
raise ValueError
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Test2 Baseband Rx File Sink
# Generated: Mon Jun 12 17:02:58 2017
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from optparse import OptionParser
import sip
import sys
import time
from gnuradio import qtgui
# Zyg
import time
import os
from threading import Timer
curFileDir = os.path.dirname(os.path.realpath(__file__))
class Test2_baseband_rx_file_sink(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Test2 Baseband Rx File Sink")
Qt.QWidget.__init__(self)
self.setWindowTitle("Test2 Baseband Rx File Sink")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "Test2_baseband_rx_file_sink")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 1.4e6
self.rx_gain = rx_gain = 30
self.freq = freq = 1e9
##################################################
# Blocks
##################################################
self._rx_gain_range = Range(0, 200, 1, 30, 200)
self._rx_gain_win = RangeWidget(self._rx_gain_range, self.set_rx_gain, 'Receiver Gain', "counter_slider", float)
self.top_layout.addWidget(self._rx_gain_win)
self.uhd_usrp_source_0 = uhd.usrp_source(
",".join(("", "master_clock_rate=52e6, --fifo=self.logfile")),
uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
)
self.uhd_usrp_source_0.set_clock_source('gpsdo', 0)
self.uhd_usrp_source_0.set_time_source('gpsdo', 0)
self.uhd_usrp_source_0.set_samp_rate(samp_rate)
self.uhd_usrp_source_0.set_center_freq(2.4e9, 0)
self.uhd_usrp_source_0.set_gain(rx_gain, 0)
self.uhd_usrp_source_0.set_antenna('TX/RX', 0)
self.qtgui_time_sink_x_0_0 = qtgui.time_sink_c(
1024, #size
samp_rate, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_0.set_update_time(0.10)
self.qtgui_time_sink_x_0_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, 0.5, 0, 0, "")
self.qtgui_time_sink_x_0_0.enable_autoscale(False)
self.qtgui_time_sink_x_0_0.enable_grid(False)
self.qtgui_time_sink_x_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0.enable_control_panel(False)
if not True:
self.qtgui_time_sink_x_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
if(i % 2 == 0):
self.qtgui_time_sink_x_0_0.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0_0.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_time_sink_x_0_0_win)
self.qtgui_time_sink_x_0 = qtgui.time_sink_c(
1024, #size
samp_rate, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0.set_update_time(0.10)
self.qtgui_time_sink_x_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, 0.3, 0, 0, "")
self.qtgui_time_sink_x_0.enable_autoscale(False)
self.qtgui_time_sink_x_0.enable_grid(False)
self.qtgui_time_sink_x_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0.enable_control_panel(False)
if not True:
self.qtgui_time_sink_x_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
if(i % 2 == 0):
self.qtgui_time_sink_x_0.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_time_sink_x_0_win)
self.qtgui_freq_sink_x_0_0 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0_0.set_update_time(0.10)
self.qtgui_freq_sink_x_0_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_0.enable_grid(False)
self.qtgui_freq_sink_x_0_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0_0.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_freq_sink_x_0_0_win)
self.qtgui_freq_sink_x_0 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0.set_update_time(0.10)
self.qtgui_freq_sink_x_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0.enable_grid(False)
self.qtgui_freq_sink_x_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_freq_sink_x_0_win)
self.low_pass_filter_0 = filter.fir_filter_ccf(1, firdes.low_pass(
1, samp_rate, 120e3, 10e3, firdes.WIN_HAMMING, 6.76))
self._freq_range = Range(0.5e9, 1.5e9, 1e6, 1e9, 200)
self._freq_win = RangeWidget(self._freq_range, self.set_freq, 'Frequencey', "counter_slider", float)
self.top_layout.addWidget(self._freq_win)
# Zyg
epochTimeStr = str(int(time.time()))
outFileName = 'Test2_baseband_rx_file_sink_Mod_'+epochTimeStr+'.out'
outFilePath = os.path.join(curFileDir, outFileName)
# Zyg
self.blocks_file_sink_0 = blocks.file_sink(gr.sizeof_gr_complex*1, outFilePath, False)
self.blocks_file_sink_0.set_unbuffered(False)
##################################################
# Connections
##################################################
self.connect((self.low_pass_filter_0, 0), (self.qtgui_freq_sink_x_0_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.qtgui_time_sink_x_0_0, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.blocks_file_sink_0, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.low_pass_filter_0, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.qtgui_freq_sink_x_0, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.qtgui_time_sink_x_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "Test2_baseband_rx_file_sink")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.uhd_usrp_source_0.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0_0.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0.set_samp_rate(self.samp_rate)
self.qtgui_freq_sink_x_0_0.set_frequency_range(0, self.samp_rate)
self.qtgui_freq_sink_x_0.set_frequency_range(0, self.samp_rate)
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.samp_rate, 120e3, 10e3, firdes.WIN_HAMMING, 6.76))
def get_rx_gain(self):
return self.rx_gain
def set_rx_gain(self, rx_gain):
self.rx_gain = rx_gain
self.uhd_usrp_source_0.set_gain(self.rx_gain, 0)
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
def main(top_block_cls=Test2_baseband_rx_file_sink, options=None):
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
# Zyg: Move quitting here and add new one for debugging.
def quitting():
tb.stop()
tb.wait()
def quiteFromTimer():
print('Quitting by Timer...')
quitting()
qapp.quit()
t = Timer(5.0, quiteFromTimer)
tb.start()
tb.show()
# Zyg
t.start()
# Zyg
# def quitting():
# tb.stop()
# tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
"""
The MIT License
Copyright (c) 2010 Olle Johansson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from twisted.internet import task
from twisted.internet import reactor
from twisted.names.srvconnect import SRVConnector
from twisted.words.xish import domish, xpath
from twisted.words.protocols.jabber import xmlstream, client, jid
import platform
from time import time
from datetime import datetime, date
from utils.pyanno import raises, abstractMethod, returnType, parameterTypes, deprecatedMethod, \
privateMethod, protectedMethod, selfType, ignoreType, callableType
from shoutbox.Shoutbox import *
from utils.utilities import *
from bridges.XmppBridge import *
class XMPPClientConnector(SRVConnector):
def __init__(self, reactor, domain, factory):
SRVConnector.__init__(self, reactor, 'xmpp-client', domain, factory)
def pickServer(self):
host, port = SRVConnector.pickServer(self)
if not self.servers and not self.orderedServers:
# no SRV record, fall back..
port = 5222
return host, port
class TwistedBridge(XmppBridge):
"""
TwistedBridge implements the abstract class XmppBridge using the Twisted
framework.
"""
client_supported_features = ['jabber:iq:last', 'jabber:iq:version']
login = ""
passwd = ""
room = ""
host = ""
port = 5222
roomjid = ""
resource = ""
xmlstream = None
last_time = 0
ignorelist = []
def make_connection(self):
"""
Make an XMPP connection and authorize the user.
"""
self.jid = jid.JID(self.login)
f = client.XMPPClientFactory(self.jid, self.passwd)
f.addBootstrap(xmlstream.STREAM_CONNECTED_EVENT, self.connected)
f.addBootstrap(xmlstream.STREAM_END_EVENT, self.disconnected)
f.addBootstrap(xmlstream.STREAM_AUTHD_EVENT, self.authenticated)
f.addBootstrap(xmlstream.INIT_FAILED_EVENT, self.init_failed)
connector = XMPPClientConnector(reactor, self.jid.host, f)
connector.connect()
def close_connection(self):
self.send_presence(ptype="unavailable", reason="Quitting...")
self.xmlstream.sendFooter()
def connected(self, xs):
self.logprint('Connected.')
self.xmlstream = xs
# Consider last activity time to be when connected.
self.update_last_time()
# Log all traffic
if self.cfg.get_bool('debug'):
xs.rawDataInFn = self.rawDataIn
xs.rawDataOutFn = self.rawDataOut
# Add Event observers
xs.addObserver("/message[@type='groupchat']", self.handle_message)
xs.addObserver("/message[@type='chat']", self.handle_message)
#xs.addObserver("/message[@type='chat']", self.handle_message)
xs.addObserver("/presence", self.handle_presence)
xs.addObserver("/iq", self.handle_iq)
def disconnected(self, xs):
self.logprint('Disconnected.')
#reactor.stop()
def init_failed(self, failure):
self.logprint("Initialization failed.", failure)
self.xmlstream.sendFooter()
def send_stanza(self, stanza):
if not self.xmlstream:
self.logprint("Stanza not sent, no xml stream:\n", stanza.toXml())
return
self.logprint("Sending stanza:\n", stanza.toXml())
self.xmlstream.send(stanza)
def handle_iq(self, iq):
"""
Handles incoming IQ stanzas and dispatches to specific handle methods.
"""
frm = iq.getAttribute('from')
to = iq.getAttribute('to')
id = iq.getAttribute('id')
iqtype = iq.getAttribute('type')
# Call the relevant iq handler method.
self.lookup_iq_method(iqtype)(frm=frm, to=to, id=id, query=iq.query)
# Trigger handleXmppIq event
self.trigger_plugin_event('XmppIq', iq.toXml())
def handle_iq_GET(self, frm=None, to=None, id=None, query=None):
if query.defaultUri == 'jabber:iq:last':
self.send_iq_last(to=frm, id=id)
elif query.defaultUri == 'jabber:iq:version':
self.send_iq_version(to=frm, id=id)
elif query.defaultUri == 'http://jabber.org/protocol/disco#info':
self.handle_iq_DISCO(frm=frm, to=to, id=id, query=query)
else:
# Default to sending back error for unknown get iq.
self.send_iq_error(to=frm, id=id, query=query)
def handle_iq_RESULT(self, frm=None, to=None, id=None, query=None):
"""
IQ result is ignored.
"""
pass
def send_iq_version(self, frm=None, to=None, id=None):
"""
Returns iq stanza with client and system information.
"""
querynode = domish.Element(('jabber:iq:version', 'query'))
querynode.addElement('name', content=self.client_name)
querynode.addElement('version', content=self.client_version)
querynode.addElement('os', content=self.get_os_info())
self.send_iq('result', id, to=to, children=[querynode])
def send_iq_last(self, to=None, id=None):
"""
Return IQ stanza with information on seconds since last client usage.
"""
query = domish.Element(('jabber:iq:last', 'query'))
query['seconds'] = self.get_last_activity()
self.send_iq('result', id, to=to, children=[query])
def send_iq_disco(self, frm=None, to=None, id=None, query=None):
"""
Send IQ stanza with discovery information.
"""
resultquery = domish.Element(('http://jabber.org/protocol/disco#info', 'query'))
for f in self.client_supported_features:
feature = domish.Element((None, 'feature'))
feature['var'] = f
resultquery.addChild(feature)
self.send_iq("result", id, to=to, children=[resultquery])
@raises( BridgeWrongTypeError )
def send_iq_error(self, to=None, id=None, iqtype=None, query=None, condition=None):
"""
Build and send IQ error stanza.
"""
errornode = domish.Element((None, 'error'))
if not iqtype:
iqtype = 'cancel'
if iqtype not in ['cancel', 'continue', 'modify', 'auth', 'wait']:
raise BridgeWrongTypeError
errornode['type'] = iqtype
if not condition:
condition = 'feature-not-implemented'
errornode.addElement(condition, defaultUri='urn:ietf:params:xml:ns:xmpp-stanzas')
if reason:
errornode.addElement('text', defaultUri='urn:ietf:params:xml:ns:xmpp-stanzas', content=reason)
self.send_iq("error", id, to=to, children=[query, errornode])
@raises( BridgeWrongTypeError )
def send_iq(self, iqtype, id=None, frm=None, to=None, children=None):
"""
Sends an IQ stanza on the xml stream.
"""
if iqtype not in ['set', 'get', 'result', 'error']:
raise BridgeWrongTypeError
iq = domish.Element((None, 'iq'))
iq['type'] = iqtype
if not frm:
frm = self.login + '/' + self.current_nick
iq['from'] = frm
if id:
iq['id'] = id
else:
iq.addUniqueId()
if to:
iq['to'] = to
if children:
for child in children:
iq.addChild(child)
self.send_stanza(iq)
@returnType( bool )
def handle_presence(self, pres):
if not pres:
return False
# Parse presence information.
(login, sep, nick) = pres['from'].rpartition('/')
fromjid = jid.JID(pres['from'])
for x in pres.elements(('http://jabber.org/protocol/muc#user', 'x')):
if x.item and x.item.hasAttribute('jid'):
fromjid = jid.JID(x.item['jid'])
fromstr = fromjid.user + '@' + fromjid.host
# Call the proper presence handler.
if pres.hasAttribute('type'):
self.lookup_presence_method(pres['type'])(pres, fromjid=fromjid, fromstr=fromstr,
login=login, nick=nick)
else:
self.handle_presence_AVAILABLE(pres, fromstr=fromstr, nick=nick)
# Trigger handleXmppPresence event
self.trigger_plugin_event('XmppPresence', pres.toXml())
def handle_presence_DEFAULT(self, pres, fromjid=None, **kwargs):
self.logprint("Received unknown presence:", pres.toXml())
def handle_presence_ERROR(self, pres, fromjid=None, **kwargs):
self.logprint("Received error presence:", pres.toXml())
if pres.error and pres.error.hasAttribute('code'):
code = pres.error['code']
if code == "409":
# Nick taken, default to original nick.
self.change_nick(self.resource)
def send_presence(self, xmlns=None, ptype=None, status=None, show=None,
frm=None, to=None, children=None):
"""
Build and send presence stanza.
"""
presence = domish.Element((xmlns, 'presence'))
if frm:
presence['from'] = frm
if to:
presence['to'] = to
if ptype:
presence['type'] = ptype
if status:
presence.addElement('status', content=status)
if show in ('chat', 'dnd', 'away', 'xa'):
presence.addElement('show', content=show)
if children:
for k, v in children.items():
presence.addElement(k, defaultUri=v['defaultUri'], content=v['content'])
self.send_stanza(presence)
def handle_message(self, mess):
"""
Handle an XMPP message.
"""
if mess.x and mess.x.defaultUri:
# Check if message is delayed.
if mess.x.defaultUri in ['jabber:x:delay', 'urn:xmpp:delay']:
self.logprint("Skipping delayed message.")
return
# Ignore status message about anonymous room.
if mess.x.defaultUri == 'http://jabber.org/protocol/muc#user':
if mess.x.status and mess.x.status.getAttribute('code') == '100':
self.logprint("Anonymous room message, skipping.")
return
fromstr = mess.getAttribute('from')
fromjid = jid.JID(fromstr)
# Check if user is in ignore list
if fromjid in self.ignorelist:
return
# Groupchat messages have different from jid
if mess['type'] in ['groupchat', 'chat']:
(fromstr, sep, nick) = fromstr.rpartition('/')
else:
nick = fromjid.user + '@' + fromjid.host
# Skip if message is sent by shoutbridge
#print "Nick is", nick
fromuser = self.roster.get(nick)
if fromuser and fromuser.name == self.login or nick == self.current_nick:
self.logprint("Got message from myself, skipping...")
return
# Get message body.
body = getElStr(mess.body)
# Send message.
user = self.get_from_roster(nick, fromstr)
if body and mess['type'] in ['message', 'groupchat', None]:
self.logprint("Relaying message to shoutbox:", user.id, user.jid, user.name, "\n", body)
self.update_last_time()
self.shoutbox.sendShout(user, body)
elif body and mess['type'] == 'chat':
self.logprint("Received priavate message:", user.id, user.jid, user.name, "\n", body)
self.update_last_time()
shout = Shout(0, user.id, nick, body, time.time())
self.trigger_plugin_event('XmppDirectMessage', shout)
else:
self.logprint("Unknown message:", mess.toXml())
# Trigger handleXmppMessage event
mess['nick'] = nick
shout = Shout(0, 0, nick, body, time.time())
self.trigger_plugin_event('XmppMessage', mess.toXml())
self.trigger_plugin_event('Message', shout)
def send_message(self, tojid, text, nick=None, notrigger=False):
"""
Send an text as XMPP message to tojid
"""
try:
if nick:
self.change_nick(nick)
message = domish.Element((None, 'message'))
message['to'] = tojid
message['from'] = self.login + '/' + nick
message['type'] = 'groupchat'
message.addElement('body', content=text)
self.update_last_time()
self.send_stanza(message)
shout = Shout(0, 0, nick, text, time.time())
if not notrigger:
self.trigger_plugin_event('SentMessage', shout)
except UnicodeDecodeError:
self.logprint("Unicode Decode Error: ", text)
def reactor_error(self, failure):
# FIXME: Log to file
self.logprint("Reactor failure", failure)
# Let's try restarting if there is a failure. Hope we don't get a feedback loop.
self.start_message_loop()
return True
def start_message_loop(self):
l = task.LoopingCall(self.process_shoutbox_messages)
# We are using longpoll, so do another request within a second.
d1 = l.start(float(self.cfg.loop_time))
d1.addErrback(self.reactor_error)
def start_ping_loop(self):
l2 = task.LoopingCall(self.ping)
d2 = l2.start(60.0)
d2.addErrback(self.reactor_error)
def listen(self):
"""
Start listening on XMPP and Shoutbox, relaying messages.
"""
try:
# Send messages from shoutbox every few seconds
self.start_message_loop()
self.start_ping_loop()
# Start the reactor
reactor.run()
except KeyboardInterrupt:
self.close_connection()
self.logprint("Exiting...")
| |
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import collections
import logging
import grpc
from opencensus.ext import grpc as oc_grpc
from opencensus.ext.grpc import utils as grpc_utils
from opencensus.trace import attributes_helper, execution_context
from opencensus.trace import span as span_module
from opencensus.trace import time_event
from opencensus.trace.propagation import binary_format
log = logging.getLogger(__name__)
ATTRIBUTE_COMPONENT = 'COMPONENT'
ATTRIBUTE_ERROR_NAME = 'ERROR_NAME'
ATTRIBUTE_ERROR_MESSAGE = 'ERROR_MESSAGE'
GRPC_HOST_PORT = 'GRPC_HOST_PORT'
GRPC_METHOD = 'GRPC_METHOD'
SENT_PREFIX = 'Sent'
TIMEOUT = 3
# Do not trace StackDriver Trace exporter activities to avoid deadlock.
CLOUD_TRACE = 'google.devtools.cloudtrace'
class _ClientCallDetails(
collections.namedtuple(
'_ClientCallDetails',
('method', 'timeout', 'metadata', 'credentials')),
grpc.ClientCallDetails):
pass
class OpenCensusClientInterceptor(grpc.UnaryUnaryClientInterceptor,
grpc.UnaryStreamClientInterceptor,
grpc.StreamUnaryClientInterceptor,
grpc.StreamStreamClientInterceptor):
def __init__(self, tracer=None, host_port=None):
self._tracer = tracer
self.host_port = host_port
self._propagator = binary_format.BinaryFormatPropagator()
@property
def tracer(self):
return self._tracer or execution_context.get_opencensus_tracer()
def _start_client_span(self, client_call_details):
span = self.tracer.start_span(
name=_get_span_name(client_call_details)
)
span.span_kind = span_module.SpanKind.CLIENT
# Add the component grpc to span attribute
self.tracer.add_attribute_to_current_span(
attribute_key=attributes_helper.COMMON_ATTRIBUTES.get(
ATTRIBUTE_COMPONENT),
attribute_value='grpc')
# Add the host:port info to span attribute
self.tracer.add_attribute_to_current_span(
attribute_key=attributes_helper.GRPC_ATTRIBUTES.get(
GRPC_HOST_PORT),
attribute_value=self.host_port)
# Add the method to span attribute
self.tracer.add_attribute_to_current_span(
attribute_key=attributes_helper.GRPC_ATTRIBUTES.get(GRPC_METHOD),
attribute_value=str(client_call_details.method))
return span
def _intercept_call(
self, client_call_details, request_iterator, grpc_type
):
metadata = ()
if client_call_details.metadata is not None:
metadata = client_call_details.metadata
# Start a span
current_span = self._start_client_span(client_call_details)
span_context = current_span.context_tracer.span_context
header = self._propagator.to_header(span_context)
grpc_trace_metadata = {
oc_grpc.GRPC_TRACE_KEY: header,
}
if isinstance(metadata, list):
metadata_to_append = list(six.iteritems(grpc_trace_metadata))
else:
metadata_to_append = tuple(six.iteritems(grpc_trace_metadata))
metadata = metadata + metadata_to_append
client_call_details = _ClientCallDetails(
client_call_details.method,
client_call_details.timeout,
metadata,
client_call_details.credentials)
request_iterator = grpc_utils.wrap_iter_with_message_events(
request_or_response_iter=request_iterator,
span=current_span,
message_event_type=time_event.Type.SENT
)
return client_call_details, request_iterator, current_span
def _callback(self, current_span):
def callback(future_response):
grpc_utils.add_message_event(
proto_message=future_response.result(),
span=current_span,
message_event_type=time_event.Type.RECEIVED,
)
self._trace_future_exception(future_response)
self.tracer.end_span()
return callback
def _trace_future_exception(self, response):
# Trace the exception for a grpc.Future if any
exception = response.exception()
if exception is not None:
exception = str(exception)
self.tracer.add_attribute_to_current_span(
attribute_key=attributes_helper.COMMON_ATTRIBUTES.get(
ATTRIBUTE_ERROR_MESSAGE),
attribute_value=exception)
def intercept_unary_unary(
self, continuation, client_call_details, request
):
if CLOUD_TRACE in client_call_details.method:
response = continuation(client_call_details, request)
return response
new_details, new_request, current_span = self._intercept_call(
client_call_details=client_call_details,
request_iterator=iter((request,)),
grpc_type=oc_grpc.UNARY_UNARY)
response = continuation(
new_details,
next(new_request))
response.add_done_callback(self._callback(current_span))
return response
def intercept_unary_stream(
self, continuation, client_call_details, request
):
if CLOUD_TRACE in client_call_details.method:
response = continuation(client_call_details, request)
return response
new_details, new_request_iterator, current_span = self._intercept_call(
client_call_details=client_call_details,
request_iterator=iter((request,)),
grpc_type=oc_grpc.UNARY_STREAM)
return grpc_utils.WrappedResponseIterator(
continuation(new_details, next(new_request_iterator)),
current_span)
def intercept_stream_unary(
self, continuation, client_call_details, request_iterator
):
if CLOUD_TRACE in client_call_details.method:
response = continuation(client_call_details, request_iterator)
return response
new_details, new_request_iterator, current_span = self._intercept_call(
client_call_details=client_call_details,
request_iterator=request_iterator,
grpc_type=oc_grpc.STREAM_UNARY)
response = continuation(
new_details,
new_request_iterator)
response.add_done_callback(self._callback(current_span))
return response
def intercept_stream_stream(
self, continuation, client_call_details, request_iterator
):
if CLOUD_TRACE in client_call_details.method:
response = continuation(client_call_details, request_iterator)
return response
new_details, new_request_iterator, current_span = self._intercept_call(
client_call_details=client_call_details,
request_iterator=request_iterator,
grpc_type=oc_grpc.STREAM_STREAM)
return grpc_utils.WrappedResponseIterator(
continuation(new_details, new_request_iterator), current_span)
def _get_span_name(client_call_details):
"""Generates a span name based off of the gRPC client call details"""
method_name = client_call_details.method[1:].replace('/', '.')
return '{}.{}'.format(SENT_PREFIX, method_name)
| |
"""
Defines and draws the status bar that should appear along the bottom of the
main SasView window.
"""
import wx
import sys
import logging
import datetime
from wx import StatusBar as wxStatusB
from wx.lib import newevent
import wx.richtext
from sas.sasgui.guiframe.gui_style import GUIFRAME_ICON
logger = logging.getLogger(__name__)
# Number of fields on the status bar
NB_FIELDS = 4
#position of the status bar's fields
ICON_POSITION = 0
MSG_POSITION = 1
GAUGE_POSITION = 2
CONSOLE_POSITION = 3
BUTTON_SIZE = 40
STATUS_BAR_ICON_SIZE = 12
CONSOLE_WIDTH = 500
CONSOLE_HEIGHT = 300
if sys.platform.count("win32") > 0:
FONT_VARIANT = 0
else:
FONT_VARIANT = 1
GREEN = wx.Colour(95, 190, 95)
YELLOW = wx.Colour(247, 214, 49)
RED = wx.Colour(234, 89, 78)
class ConsolePanel(wx.Panel):
"""
Interaction class for adding messages to the Console log.
"""
def __init__(self, parent, *args, **kwargs):
wx.Panel.__init__(self, parent=parent, *args, **kwargs)
self.parent = parent
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.msg_txt = wx.richtext.RichTextCtrl(self, size=(CONSOLE_WIDTH-40,
CONSOLE_HEIGHT-60),
style=wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER)
self.msg_txt.SetEditable(False)
timestamp = datetime.datetime.now()
status = '{:%Y-%m-%d %H:%M:%S} : No message available'.format(timestamp)
self.msg_txt.SetValue(status)
self.sizer.Add(self.msg_txt, 1, wx.EXPAND|wx.ALL, 10)
self.SetSizer(self.sizer)
def set_message(self, status="", event=None):
"""
Adds a message to the console log as well as the main sasview.log
:param status: A status message to be sent to the console log.
:param event: A wx event.
"""
status = str(status)
if status.strip() == "":
return
# Add timestamp
timestamp = datetime.datetime.now()
status = '{:%Y-%m-%d %H:%M:%S} : '.format(timestamp) + status
color = (0, 0, 0) #black
icon_bmp = wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, wx.ART_TOOLBAR)
if hasattr(event, "info"):
icon_type = event.info.lower()
if icon_type == "warning":
logger.warning(status)
color = (0, 0, 255) # blue
icon_bmp = wx.ArtProvider.GetBitmap(wx.ART_WARNING,
wx.ART_TOOLBAR)
if icon_type == "error":
logger.error(status)
color = (255, 0, 0) # red
icon_bmp = wx.ArtProvider.GetBitmap(wx.ART_ERROR,
wx.ART_TOOLBAR)
if icon_type == "info":
icon_bmp = wx.ArtProvider.GetBitmap(wx.ART_INFORMATION,
wx.ART_TOOLBAR)
self.msg_txt.Newline()
self.msg_txt.WriteBitmap(icon_bmp)
self.msg_txt.BeginTextColour(color)
self.msg_txt.WriteText("\t")
self.msg_txt.AppendText(status)
self.msg_txt.EndTextColour()
class Console(wx.Frame):
"""
The main class defining the Console window.
"""
def __init__(self, parent=None, status="", *args, **kwds):
kwds["size"] = (CONSOLE_WIDTH, CONSOLE_HEIGHT)
kwds["title"] = "Console"
wx.Frame.__init__(self, parent=parent, *args, **kwds)
self.SetWindowVariant(FONT_VARIANT)
self.panel = ConsolePanel(self)
self.panel.set_message(status=status)
wx.EVT_CLOSE(self, self.Close)
def set_multiple_messages(self, messages=[]):
"""
Method to send an arbitrary number of messages to the console log
:param messages: A list of strings to be sent to the console log.
"""
if messages:
for status in messages:
self.panel.set_message(status=status)
def set_message(self, status, event=None):
"""
Exposing the base ConsolePanel set_message
:param status: A status message to be sent to the console log.
:param event: A wx event.
"""
self.panel.set_message(status=str(status), event=event)
def Close(self, event):
"""
Calling close on the panel will hide the panel.
:param event: A wx event.
"""
self.Hide()
class StatusBar(wxStatusB):
"""
Application status bar
"""
def __init__(self, parent, id):
wxStatusB.__init__(self, parent, id)
self.parent = parent
self.parent.SetStatusBarPane(MSG_POSITION)
#Layout of status bar
width = STATUS_BAR_ICON_SIZE
height = STATUS_BAR_ICON_SIZE
self.SetFieldsCount(NB_FIELDS)
# Leave some space for the resize handle in the last field
console_btn_width = 80
self.SetStatusWidths([width+4, -2, -1, width+console_btn_width])
self.SetMinHeight(height + 10)
#display default message
self.msg_position = MSG_POSITION
# Create progress bar
gauge_width = 5 * width
self.gauge = wx.Gauge(self, size=(gauge_width, height),
style=wx.GA_HORIZONTAL)
self.gauge.Hide()
# Create status bar icon reflecting the type of status
# for the last message
self.status_color = wx.StaticText(self, id=wx.NewId(), label=" ",
size=wx.Size(15, 15))
self.status_color.SetBackgroundColour(GREEN)
self.status_color.SetForegroundColour(GREEN)
# Create the button used to show the console dialog
self.console_button = wx.Button(self, wx.NewId(), "Console",
size=(console_btn_width, -1))
font = self.console_button.GetFont()
_, pixel_h = font.GetPixelSize()
font.SetPixelSize(wx.Size(0, int(pixel_h*0.9)))
self.console_button.SetFont(font)
self.console_button.SetToolTipString("History of status bar messages")
self.console_button.Bind(wx.EVT_BUTTON, self._onMonitor,
id=self.console_button.GetId())
self.reposition()
## Current progress value of the bar
self.nb_start = 0
self.nb_progress = 0
self.nb_stop = 0
self.frame = None
self.list_msg = []
self.frame = Console(parent=self)
if hasattr(self.frame, "IsIconized"):
if not self.frame.IsIconized():
try:
icon = self.parent.GetIcon()
self.frame.SetIcon(icon)
except:
try:
FRAME_ICON = wx.Icon(GUIFRAME_ICON.FRAME_ICON_PATH,
wx.BITMAP_TYPE_ICO)
self.frame.SetIcon(FRAME_ICON)
except:
pass
self.frame.set_multiple_messages(self.list_msg)
self.frame.Hide()
self.progress = 0
self.timer = wx.Timer(self, -1)
self.timer_stop = wx.Timer(self, -1)
self.thread = None
self.Bind(wx.EVT_TIMER, self._on_time, self.timer)
self.Bind(wx.EVT_TIMER, self._on_time_stop, self.timer_stop)
self.Bind(wx.EVT_SIZE, self.on_size)
self.Bind(wx.EVT_IDLE, self.on_idle)
def reposition(self):
"""
Place the various fields in their proper position
"""
rect = self.GetFieldRect(GAUGE_POSITION)
self.gauge.SetPosition((rect.x, rect.y))
rect = self.GetFieldRect(ICON_POSITION)
self.status_color.SetPosition((rect.x, rect.y))
rect = self.GetFieldRect(CONSOLE_POSITION)
self.console_button.SetPosition((rect.x, rect.y))
self.size_changed = False
def on_idle(self, event):
"""
When the window is idle, check if the window has been resized
"""
if self.size_changed:
self.reposition()
def on_size(self, evt):
"""
If the window is resized, redraw the window.
"""
self.reposition()
self.size_changed = True
def get_msg_position(self):
"""
Get the last known message that was displayed on the console window.
"""
return self.msg_position
def SetStatusText(self, text="", number=MSG_POSITION, event=None):
"""
Set the text that will be displayed in the status bar.
"""
wxStatusB.SetStatusText(self, text.split('\n', 1)[0], number)
self.list_msg.append(text)
self.status_color.SetBackgroundColour(GREEN)
self.status_color.SetForegroundColour(GREEN)
if self.frame is not None:
self.frame.set_message(status=text, event=event)
def PopStatusText(self, *args, **kwds):
"""
Override status bar
"""
wxStatusB.PopStatusText(self, field=MSG_POSITION)
def PushStatusText(self, *args, **kwds):
"""
PushStatusText
"""
text = "PushStatusText: What is this string?"
wxStatusB.PushStatusText(self, field=MSG_POSITION, string=text)
def enable_clear_gauge(self):
"""
clear the progress bar
"""
flag = True
# Why we do this?
#if (self.nb_start <= self.nb_stop) or \
# (self.nb_progress <= self.nb_stop):
# flag = True
return flag
def _on_time_stop(self, evt):
"""
Clear the progress bar
:param evt: wx.EVT_TIMER
"""
count = 0
while count <= 100:
count += 1
self.timer_stop.Stop()
self.clear_gauge(msg="")
self.nb_progress = 0
self.nb_start = 0
self.nb_stop = 0
def _on_time(self, evt):
"""
Update the progress bar while the timer is running
:param evt: wx.EVT_TIMER
"""
# Check stop flag that can be set from non main thread
if self.timer.IsRunning():
self.gauge.Pulse()
def clear_gauge(self, msg=""):
"""
Hide the gauge
"""
self.progress = 0
self.gauge.SetValue(0)
self.gauge.Hide()
def set_icon(self, event):
"""
Display icons related to the type of message sent to the statusbar
when available. No icon is displayed if the message is empty
"""
if hasattr(event, "status"):
status = str(event.status)
if status.strip() == "":
return
else:
return
if not hasattr(event, "info"):
return
# Get the size of the button images
height = STATUS_BAR_ICON_SIZE
msg = event.info.lower()
if msg == "warning":
self.status_color.SetBackgroundColour(YELLOW)
self.status_color.SetForegroundColour(YELLOW)
elif msg == "error":
self.status_color.SetBackgroundColour(RED)
self.status_color.SetForegroundColour(RED)
else:
self.status_color.SetBackgroundColour(GREEN)
self.status_color.SetForegroundColour(GREEN)
def set_dialog(self, event):
"""
Display dialogbox
"""
if not hasattr(event, "info"):
return
msg = event.info.lower()
if msg == "error":
e_msg = "Error(s) Occurred:\n"
e_msg += "\t" + event.status + "\n\n"
e_msg += "Further information might be available in "
e_msg += "the Console log (bottom right corner)."
wx.MessageBox(e_msg, style=wx.ICON_ERROR)
def set_message(self, event):
"""
display received message on the statusbar
"""
if hasattr(event, "status"):
self.SetStatusText(text=str(event.status), event=event)
def set_gauge(self, event):
"""
change the state of the gauge according the state of the current job
"""
if not hasattr(event, "type"):
return
type = event.type
self.gauge.Show(True)
if type.lower() == "start":
self.nb_start += 1
#self.timer.Stop()
self.progress += 5
self.gauge.SetValue(int(self.progress))
self.progress += 5
if self.progress < self.gauge.GetRange() - 20:
self.gauge.SetValue(int(self.progress))
if type.lower() == "progress":
self.nb_progress += 1
self.timer.Start(1)
self.gauge.Pulse()
if type.lower() == "update":
self.progress += 5
if self.progress < self.gauge.GetRange()- 20:
self.gauge.SetValue(int(self.progress))
if type.lower() == "stop":
self.nb_stop += 1
self.gauge.Show(True)
if self.enable_clear_gauge():
self.timer.Stop()
self.progress = 0
self.gauge.SetValue(100)
self.timer_stop.Start(5)
def set_status(self, event):
"""
Update the status bar .
:param type: type of message send.
type must be in ["start","progress","update","stop"]
:param msg: the message itself as string
:param thread: if updatting using a thread status
"""
self.set_message(event=event)
self.set_icon(event=event)
self.set_gauge(event=event)
# dialog on error
self.set_dialog(event=event)
def _onMonitor(self, event):
"""
Pop up a frame with messages sent to the status bar
"""
self.frame.Show(False)
self.frame.Show(True)
class SPageStatusbar(wxStatusB):
def __init__(self, parent, timeout=None, *args, **kwds):
wxStatusB.__init__(self, parent, *args, **kwds)
self.SetFieldsCount(1)
self.timeout = timeout
width, height = parent.GetSizeTuple()
self.gauge = wx.Gauge(self, style=wx.GA_HORIZONTAL,
size=(width, height/10))
rect = self.GetFieldRect(0)
self.gauge.SetPosition((rect.x , rect.y ))
if self.timeout is not None:
self.gauge.SetRange(int(self.timeout))
self.timer = wx.Timer(self, -1)
self.Bind(wx.EVT_TIMER, self._on_time, self.timer)
self.timer.Start(1)
self.pos = 0
def _on_time(self, evt):
"""
Update the progress bar while the timer is running
:param evt: wx.EVT_TIMER
"""
# Check stop flag that can be set from non main thread
if self.timeout is None and self.timer.IsRunning():
self.gauge.Pulse()
if __name__ == "__main__":
app = wx.PySimpleApp()
frame = wx.Frame(None, wx.ID_ANY, 'test frame')
#statusBar = StatusBar(frame, wx.ID_ANY)
statusBar = SPageStatusbar(frame)
frame.SetStatusBar(statusBar)
frame.Show(True)
#event = MessageEvent()
#event.type = "progress"
#event.status = "statusbar...."
#event.info = "error"
#statusBar.set_status(event=event)
app.MainLoop()
| |
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import copy
import os.path
from core.system import current_user, current_time
from core.mashup.alias import Alias
from core.mashup.component import Component
from core.mashup.mashup import Mashup
class MashupController(object):
def __init__(self, originalController, vt_controller, vt_version, mshptrail=None):
self.vtController = vt_controller
self.originalController = originalController
self.vtVersion = vt_version
self.vtPipeline = self.vtController.vistrail.getPipeline(self.vtVersion)
self.vtPipeline.validate()
self.mshptrail = mshptrail
self.id_scope = mshptrail.id_scope
self.currentVersion = -1
self.currentMashup = None
self._changed = False
def setChanged(self, on):
self._changed = on
self.originalController.set_changed(True)
def setCurrentVersion(self, version):
self.currentVersion = version
self.vtPipeline = self.vtController.vistrail.getPipeline(self.vtVersion)
if version > -1:
self.currentMashup = self.mshptrail.getMashup(version)
self.updatePipelineAliasesFromCurrentMashup()
def getVistrailParam(self, alias):
if self.vtPipeline:
return self.vtPipeline.db_get_object(alias.component.vttype,
alias.component.vtid)
return None
def execute(self, params):
if self.vtPipeline and self.vtController:
mashup_id = self.mshptrail.id
mashup_version = self.currentVersion
reason = "mashup::%s::%s"%(str(mashup_id), mashup_version)
result = self.vtController.execute_current_workflow(custom_params=params,
reason=reason)
self.originalController.set_changed(True)
return result
return ([], False)
def updateCurrentTag(self, name):
if self.mshptrail.changeTag(self.currentVersion, name, current_user(),
current_time()):
self.setChanged(True)
return True
else:
return False
def moveTag(self, from_version, to_version, name):
tag = self.mshptrail.getTagForActionId(from_version)
if tag:
self.mshptrail.removeTagByActionId(from_version)
self.mshptrail.addTag(to_version, tag, user=current_user(),
date=current_time())
def getCurrentTag(self):
return self.mshptrail.getTagForActionId(self.currentVersion)
def versionHasTag(self, version):
return self.mshptrail.hasTagForActionId(version)
def hasTagWithName(self, name):
return self.mshptrail.hasTagWithName(name)
def getVistrailName(self):
name = ''
locator = self.currentMashup.vtid
if locator != None:
if locator.name == None:
name = ''
else:
name = os.path.split(locator.name)[1]
if name == '':
name = self.controller.vtController.name
return name
def resetVistrailPipeline(self):
self.vtController.change_selected_version(self.vtVersion)
def getVistrailWorkflowTag(self):
return self.vtController.get_pipeline_name(self.vtVersion)[9:]
def reorderAliases(self, new_order):
if self.currentMashup:
new_aliases = []
pos = 0
for old_pos in new_order:
alias = copy.copy(self.currentMashup.alias_list[old_pos])
alias.component.pos = pos
new_aliases.append(alias)
pos += 1
return self.createMashupVersion(new_aliases, quiet=False)
def updateAlias(self, alias):
"""updateAlias(alias)-> long
This will create a version with an alias change (can't be a position
change). Position changes are taken care in reorderAliases method.
"""
#print " controller updateAlias ", alias
new_aliases = []
if self.currentMashup:
for a in self.currentMashup.alias_list:
if a.id != alias.id:
calias = copy.copy(a)
else:
#print "found alias: ", a
calias = copy.copy(alias)
new_aliases.append(calias)
return self.createMashupVersion(new_aliases, quiet=False)
def updateAliasFromParam(self, param):
add_alias = True
new_aliases = []
pos = 0
for alias in self.currentMashup.alias_list:
if alias.component.vtid != param.id:
calias = copy.copy(alias)
calias.component.pos = pos
new_aliases.append(calias)
pos += 1
else:
#print "found alias: ", alias
add_alias = False
if param.alias != '':
new_alias = copy.copy(alias)
new_alias.name = param.alias
new_aliases.append(new_alias)
pos += 1
if add_alias:
parameter = self.vtPipeline.db_get_object(param.dbtype, param.id)
cid = self.id_scope.getNewId('component')
aid = self.id_scope.getNewId('alias')
component = Component(cid, parameter.vtType,
parameter.real_id, param.parent_dbtype,
param.parent_id,
param.mId, parameter.type,
parameter.strValue, parameter.pos,
pos, "")
alias = Alias(aid, param.alias, component)
new_aliases.append(alias)
self.vtPipeline.add_alias(param.alias, param.type, param.id,
param.parent_dbtype, param.parent_id,
param.mId)
else:
self.vtPipeline.change_alias(param.alias, param.type, param.id,
param.parent_dbtype, param.parent_id,
param.mId)
return self.createMashupVersion(new_aliases, quiet=False)
def updateAliasesFromPipeline(self, pipeline):
"""updateAliasesFromPipeline(self, pipeline) -> long
This will generate a new mashup by updating the aliases of the current
mashup according to the aliases in a pipeline. This assumes that the
mashup's current aliases are different from pipeline aliases by at most
one change (eg., an alias rename, an alias addition, an alias removal)
"""
pip_aliases = pipeline.aliases.keys()
mashup_aliases = [a.name for a in self.currentMashup.alias_list]
new_aliases = []
if len(pip_aliases) == len(mashup_aliases):
#an alias probably changed its name or its value
old_a = None
new_a = None
for a in self.currentMashup.alias_list:
if a.name not in pip_aliases:
old_a = copy.copy(a)
new_aliases.append(old_a)
else:
new_aliases.append(a)
for a in pip_aliases:
if a not in mashup_aliases:
new_a = (a, pipeline.aliases[a])
if old_a is not None and new_a is not None:
(a, info) = new_a
parameter = pipeline.db_get_object(info[0],info[1])
old_a.name = a
old_a.component.vttype = parameter.vtType
old_a.component.vtid = parameter.real_id
old_a.component.vtparent_type = info[2]
old_a.component.vt_parent_id = info[3]
old_a.component.mid = info[4]
old_a.component.type = parameter.type
old_a.component.val = parameter.strValue
old_a.component.vtpos = parameter.pos
elif len(pip_aliases) < len(mashup_aliases):
# an alias was removed
pos = 0
for a in self.currentMashup.alias_list:
if a.name in pip_aliases:
alias = copy.copy(a)
alias.component.pos = pos
new_aliases.append(alias)
pos += 1
else:
#an alias was added
pos = len(mashup_aliases)
new_aliases = [a for a in self.currentMashup.alias_list]
for a in pip_aliases:
if a not in mashup_aliases:
info = pipeline.aliases[a]
parameter = pipeline.db_get_object(info[0],info[1])
cid = self.id_scope.getNewId('component')
aid = self.id_scope.getNewId('alias')
component = Component(cid, parameter.vtType,
parameter.real_id, info[2], info[3],
info[4], parameter.type,
parameter.strValue, parameter.pos,
pos, "")
alias = Alias(aid, a, component)
new_aliases.append(alias)
pos += 1
return self.createMashupVersion(new_aliases, quiet=False)
def updatePipelineAliasesFromCurrentMashup(self):
self.resetVistrailPipeline()
self.vtPipeline = copy.copy(self.vtController.current_pipeline)
#first we clear all aliases in pipeline
to_remove = self.vtPipeline.aliases.values()
for (type, oId, parentType, parentId, mid) in to_remove:
self.vtPipeline.remove_alias(type, oId, parentType, parentId, mid)
parameter = self.vtPipeline.db_get_object(type,oId)
parameter.alias = ''
#now we populate the pipeline according to the aliases in the mashup
for alias in self.currentMashup.alias_list:
self.vtPipeline.add_alias(alias.name, alias.component.vttype,
alias.component.vtid,
alias.component.vtparent_type,
alias.component.vtparent_id,
alias.component.vtmid)
parameter = self.vtPipeline.db_get_object(alias.component.vttype,
alias.component.vtid)
parameter.alias = alias.name
def getMashupName(self, version=-1):
action_map = self.mshptrail.actionMap
if version == -1:
version = self.currentVersion
count = 0
while True:
hasTag = self.mshptrail.hasTagForActionId(version)
if hasTag or version <= 1:
if hasTag:
name = self.mshptrail.getTagForActionId(version)
else:
name = "ROOT"
count_str = ""
if count > 0:
count_str = " + " + str(count)
return name + count_str
version = action_map[version].parent_id
count += 1
def findFirstTaggedParent(self, version):
action_map = self.mshptrail.actionMap
version = action_map[version].parent_id
while True:
hasTag = self.mshptrail.hasTagForActionId(version)
if hasTag or version <= 1:
name = ""
if hasTag:
name = self.mshptrail.getTagForActionId(version)
return (version, name)
version = action_map[version].parent_id
def removeAlias(self, name):
"""removeAlias(name: str) -> long
This will create a new version of the mashup without alias name, add it
to the trail and set the version as the current version. It will return
the version number
"""
new_aliases = []
if self.currentMashup:
pos = 0
for alias in self.currentMashup.alias_list:
if alias.name != name:
calias = copy.copy(alias)
calias.component.pos = pos
new_aliases.append(calias)
pos += 1
return self.createMashupVersion(alias_list=new_aliases, quiet=False)
def createMashupVersion(self, alias_list, quiet=False):
id = self.id_scope.getNewId('mashup')
mashup = Mashup(id=id, name="mashup%s"%id,
vtid=self.currentMashup.vtid,
version=self.currentMashup.version,
alias_list=alias_list)
currVersion = self.mshptrail.addVersion(parent_id=self.currentVersion,
mashup=mashup,
user=current_user(),
date=current_time())
self.mshptrail.currentVersion = currVersion
self.currentMashup = mashup
#print "created new mashup ", currVersion
self.setCurrentVersion(currVersion, quiet)
self.setChanged(True)
return currVersion
| |
"""The tests for UVC camera module."""
import socket
import unittest
from unittest import mock
import pytest
import requests
from uvcclient import camera, nvr
from homeassistant.components.camera import SUPPORT_STREAM
from homeassistant.components.uvc import camera as uvc
from homeassistant.exceptions import PlatformNotReady
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant
class TestUVCSetup(unittest.TestCase):
"""Test the UVC camera platform."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
@mock.patch("uvcclient.nvr.UVCRemote")
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_full_config(self, mock_uvc, mock_remote):
"""Test the setup with full configuration."""
config = {
"platform": "uvc",
"nvr": "foo",
"password": "bar",
"port": 123,
"key": "secret",
}
mock_cameras = [
{"uuid": "one", "name": "Front", "id": "id1"},
{"uuid": "two", "name": "Back", "id": "id2"},
{"uuid": "three", "name": "Old AirCam", "id": "id3"},
]
def mock_get_camera(uuid):
"""Create a mock camera."""
if uuid == "id3":
return {"model": "airCam"}
return {"model": "UVC"}
mock_remote.return_value.index.return_value = mock_cameras
mock_remote.return_value.get_camera.side_effect = mock_get_camera
mock_remote.return_value.server_version = (3, 2, 0)
assert setup_component(self.hass, "camera", {"camera": config})
assert mock_remote.call_count == 1
assert mock_remote.call_args == mock.call("foo", 123, "secret", ssl=False)
mock_uvc.assert_has_calls(
[
mock.call(mock_remote.return_value, "id1", "Front", "bar"),
mock.call(mock_remote.return_value, "id2", "Back", "bar"),
]
)
@mock.patch("uvcclient.nvr.UVCRemote")
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_partial_config(self, mock_uvc, mock_remote):
"""Test the setup with partial configuration."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_cameras = [
{"uuid": "one", "name": "Front", "id": "id1"},
{"uuid": "two", "name": "Back", "id": "id2"},
]
mock_remote.return_value.index.return_value = mock_cameras
mock_remote.return_value.get_camera.return_value = {"model": "UVC"}
mock_remote.return_value.server_version = (3, 2, 0)
assert setup_component(self.hass, "camera", {"camera": config})
assert mock_remote.call_count == 1
assert mock_remote.call_args == mock.call("foo", 7080, "secret", ssl=False)
mock_uvc.assert_has_calls(
[
mock.call(mock_remote.return_value, "id1", "Front", "ubnt"),
mock.call(mock_remote.return_value, "id2", "Back", "ubnt"),
]
)
@mock.patch("uvcclient.nvr.UVCRemote")
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_partial_config_v31x(self, mock_uvc, mock_remote):
"""Test the setup with a v3.1.x server."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_cameras = [
{"uuid": "one", "name": "Front", "id": "id1"},
{"uuid": "two", "name": "Back", "id": "id2"},
]
mock_remote.return_value.index.return_value = mock_cameras
mock_remote.return_value.get_camera.return_value = {"model": "UVC"}
mock_remote.return_value.server_version = (3, 1, 3)
assert setup_component(self.hass, "camera", {"camera": config})
assert mock_remote.call_count == 1
assert mock_remote.call_args == mock.call("foo", 7080, "secret", ssl=False)
mock_uvc.assert_has_calls(
[
mock.call(mock_remote.return_value, "one", "Front", "ubnt"),
mock.call(mock_remote.return_value, "two", "Back", "ubnt"),
]
)
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_incomplete_config(self, mock_uvc):
"""Test the setup with incomplete configuration."""
assert setup_component(self.hass, "camera", {"platform": "uvc", "nvr": "foo"})
assert not mock_uvc.called
assert setup_component(
self.hass, "camera", {"platform": "uvc", "key": "secret"}
)
assert not mock_uvc.called
assert setup_component(
self.hass, "camera", {"platform": "uvc", "port": "invalid"}
)
assert not mock_uvc.called
@mock.patch.object(uvc, "UnifiVideoCamera")
@mock.patch("uvcclient.nvr.UVCRemote")
def setup_nvr_errors_during_indexing(self, error, mock_remote, mock_uvc):
"""Set up test for NVR errors during indexing."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_remote.return_value.index.side_effect = error
assert setup_component(self.hass, "camera", {"camera": config})
assert not mock_uvc.called
def test_setup_nvr_error_during_indexing_notauthorized(self):
"""Test for error: nvr.NotAuthorized."""
self.setup_nvr_errors_during_indexing(nvr.NotAuthorized)
def test_setup_nvr_error_during_indexing_nvrerror(self):
"""Test for error: nvr.NvrError."""
self.setup_nvr_errors_during_indexing(nvr.NvrError)
pytest.raises(PlatformNotReady)
def test_setup_nvr_error_during_indexing_connectionerror(self):
"""Test for error: requests.exceptions.ConnectionError."""
self.setup_nvr_errors_during_indexing(requests.exceptions.ConnectionError)
pytest.raises(PlatformNotReady)
@mock.patch.object(uvc, "UnifiVideoCamera")
@mock.patch("uvcclient.nvr.UVCRemote.__init__")
def setup_nvr_errors_during_initialization(self, error, mock_remote, mock_uvc):
"""Set up test for NVR errors during initialization."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_remote.return_value = None
mock_remote.side_effect = error
assert setup_component(self.hass, "camera", {"camera": config})
assert not mock_remote.index.called
assert not mock_uvc.called
def test_setup_nvr_error_during_initialization_notauthorized(self):
"""Test for error: nvr.NotAuthorized."""
self.setup_nvr_errors_during_initialization(nvr.NotAuthorized)
def test_setup_nvr_error_during_initialization_nvrerror(self):
"""Test for error: nvr.NvrError."""
self.setup_nvr_errors_during_initialization(nvr.NvrError)
pytest.raises(PlatformNotReady)
def test_setup_nvr_error_during_initialization_connectionerror(self):
"""Test for error: requests.exceptions.ConnectionError."""
self.setup_nvr_errors_during_initialization(requests.exceptions.ConnectionError)
pytest.raises(PlatformNotReady)
class TestUVC(unittest.TestCase):
"""Test class for UVC."""
def setup_method(self, method):
"""Set up the mock camera."""
self.nvr = mock.MagicMock()
self.uuid = "uuid"
self.name = "name"
self.password = "seekret"
self.uvc = uvc.UnifiVideoCamera(self.nvr, self.uuid, self.name, self.password)
self.nvr.get_camera.return_value = {
"model": "UVC Fake",
"recordingSettings": {"fullTimeRecordEnabled": True},
"host": "host-a",
"internalHost": "host-b",
"username": "admin",
"channels": [
{
"id": "0",
"width": 1920,
"height": 1080,
"fps": 25,
"bitrate": 6000000,
"isRtspEnabled": True,
"rtspUris": ["rtsp://host-a:7447/uuid_rtspchannel_0"],
},
{
"id": "1",
"width": 1024,
"height": 576,
"fps": 15,
"bitrate": 1200000,
"isRtspEnabled": False,
"rtspUris": ["rtsp://host-a:7447/uuid_rtspchannel_1"],
},
],
}
self.nvr.server_version = (3, 2, 0)
def test_properties(self):
"""Test the properties."""
assert self.name == self.uvc.name
assert self.uvc.is_recording
assert "Ubiquiti" == self.uvc.brand
assert "UVC Fake" == self.uvc.model
assert SUPPORT_STREAM == self.uvc.supported_features
def test_stream(self):
"""Test the RTSP stream URI."""
stream_source = yield from self.uvc.stream_source()
assert stream_source == "rtsp://host-a:7447/uuid_rtspchannel_0"
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClientV320")
def test_login(self, mock_camera, mock_store):
"""Test the login."""
self.uvc._login()
assert mock_camera.call_count == 1
assert mock_camera.call_args == mock.call("host-a", "admin", "seekret")
assert mock_camera.return_value.login.call_count == 1
assert mock_camera.return_value.login.call_args == mock.call()
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClient")
def test_login_v31x(self, mock_camera, mock_store):
"""Test login with v3.1.x server."""
self.nvr.server_version = (3, 1, 3)
self.uvc._login()
assert mock_camera.call_count == 1
assert mock_camera.call_args == mock.call("host-a", "admin", "seekret")
assert mock_camera.return_value.login.call_count == 1
assert mock_camera.return_value.login.call_args == mock.call()
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClientV320")
def test_login_tries_both_addrs_and_caches(self, mock_camera, mock_store):
"""Test the login tries."""
responses = [0]
def mock_login(*a):
"""Mock login."""
try:
responses.pop(0)
raise socket.error
except IndexError:
pass
mock_store.return_value.get_camera_password.return_value = None
mock_camera.return_value.login.side_effect = mock_login
self.uvc._login()
assert 2 == mock_camera.call_count
assert "host-b" == self.uvc._connect_addr
mock_camera.reset_mock()
self.uvc._login()
assert mock_camera.call_count == 1
assert mock_camera.call_args == mock.call("host-b", "admin", "seekret")
assert mock_camera.return_value.login.call_count == 1
assert mock_camera.return_value.login.call_args == mock.call()
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClientV320")
def test_login_fails_both_properly(self, mock_camera, mock_store):
"""Test if login fails properly."""
mock_camera.return_value.login.side_effect = socket.error
assert self.uvc._login() is None
assert self.uvc._connect_addr is None
def test_camera_image_tries_login_bails_on_failure(self):
"""Test retrieving failure."""
with mock.patch.object(self.uvc, "_login") as mock_login:
mock_login.return_value = False
assert self.uvc.camera_image() is None
assert mock_login.call_count == 1
assert mock_login.call_args == mock.call()
def test_camera_image_logged_in(self):
"""Test the login state."""
self.uvc._camera = mock.MagicMock()
assert self.uvc._camera.get_snapshot.return_value == self.uvc.camera_image()
def test_camera_image_error(self):
"""Test the camera image error."""
self.uvc._camera = mock.MagicMock()
self.uvc._camera.get_snapshot.side_effect = camera.CameraConnectError
assert self.uvc.camera_image() is None
def test_camera_image_reauths(self):
"""Test the re-authentication."""
responses = [0]
def mock_snapshot():
"""Mock snapshot."""
try:
responses.pop()
raise camera.CameraAuthError()
except IndexError:
pass
return "image"
self.uvc._camera = mock.MagicMock()
self.uvc._camera.get_snapshot.side_effect = mock_snapshot
with mock.patch.object(self.uvc, "_login") as mock_login:
assert "image" == self.uvc.camera_image()
assert mock_login.call_count == 1
assert mock_login.call_args == mock.call()
assert [] == responses
def test_camera_image_reauths_only_once(self):
"""Test if the re-authentication only happens once."""
self.uvc._camera = mock.MagicMock()
self.uvc._camera.get_snapshot.side_effect = camera.CameraAuthError
with mock.patch.object(self.uvc, "_login") as mock_login:
with pytest.raises(camera.CameraAuthError):
self.uvc.camera_image()
assert mock_login.call_count == 1
assert mock_login.call_args == mock.call()
| |
import argparse
import mongoengine
import redis
import sys
import rmc.models as m
import rmc.shared.constants as c
import rmc.shared.facebook as facebook
import rmc.shared.util as rmc_util
import rmc.data.crawler as rmc_crawler
import rmc.data.processor as rmc_processor
# TODO(mack): remove duplication of fields throughout code
# TODO(mack): deprecate overall rating
r = redis.StrictRedis(host=c.REDIS_HOST, port=c.REDIS_PORT, db=c.REDIS_DB)
PROFESSOR_RATING_FIELDS = [
'easiness',
'clarity',
'passion',
]
COURSE_RATING_FIELDS = [
'easiness',
'interest',
'usefulness',
'overall',
]
def increment_ratings(courses, get_rating_fn, get_fields_fn, ucs):
for uc in ucs:
ratings = get_rating_fn(courses, uc)
if not ratings:
continue
for field_key, field_value in get_fields_fn(uc):
if field_value is not None:
ratings[field_key].add_rating(field_value)
def increment_aggregate_ratings(courses, get_rating_fn, get_fields_fn, ucs):
for uc in ucs:
ratings = get_rating_fn(courses, uc)
if not ratings:
continue
for field_key, field_value in get_fields_fn(uc):
if field_value is not None:
ratings[field_key].add_aggregate_rating(field_value)
def update_mongo_course_rating():
# course => ratings
def get_rating_fn(courses, uc):
if uc.course_id not in courses:
obj = {}
for field in COURSE_RATING_FIELDS:
obj[field] = m.AggregateRating()
courses[uc.course_id] = obj
return courses[uc.course_id]
def get_fields_fn(uc):
easiness = uc.course_review.easiness
interest = uc.course_review.interest
usefulness = uc.course_review.usefulness
if easiness and interest:
overall = (easiness + interest) / 2
elif easiness:
overall = easiness
else:
overall = interest
return [
('easiness', easiness),
('interest', interest),
('overall', overall),
('usefulness', usefulness),
]
def get_aggregate_fields_fn(uc):
easiness = uc.easiness
interest = uc.interest
# TODO(mack): add usefulness metric
def calculate_overall_rating(e, i):
return ((e.count * e.rating + i.count * i.rating) /
max(1, (e.count + i.count)))
# heuristic for getting the overall rating:
# 1. the count will max of the count for each attribute
# 2. the rating will be average
overall = m.AggregateRating(
count=max(easiness.count, interest.count),
rating=calculate_overall_rating(easiness, interest),
)
return [
('easiness', easiness),
('interest', interest),
('overall', overall),
]
courses = {}
args = [courses, get_rating_fn]
menlo_ucs = m.MenloCourse.get_publicly_visible(rmc_util.MIN_NUM_RATINGS)
flow_ucs = m.UserCourse.get_publicly_visible(rmc_util.MIN_NUM_RATINGS)
increment_ratings(*(args + [get_fields_fn, menlo_ucs]))
increment_ratings(*(args + [get_fields_fn, flow_ucs]))
count = [0]
def set_course_ratings_in_mongo(courses):
for course_id, ratings in courses.items():
course = m.Course.objects.with_id(course_id)
if not course:
print 'could not find course %s in mongo' % course_id
continue
course.easiness = ratings['easiness']
course.interest = ratings['interest']
course.usefulness = ratings['usefulness']
course.overall = ratings['overall']
course.save()
count[0] += 1
set_course_ratings_in_mongo(courses)
print 'saved ratings for %d courses in mongodb' % count[0]
def update_mongo_course_professors():
count = 0
for course in m.Course.objects.only('professor_ids'):
def get_professor_ids(course, coll):
course_prof_ids_only = (coll.objects(course_id=course.id)
.only('professor_id'))
return set(
[x.professor_id for x in course_prof_ids_only
if x.professor_id]
)
professor_ids = get_professor_ids(course, m.UserCourse).union(
get_professor_ids(course, m.MenloCourse))
# TODO(mack): Looks like add_to_set doesn't validate that each item
# in the list meets the schema since it seemed to be letting me
# writing lists that contained None. Investigate if this is what it
# is doing.
course.update(add_to_set__professor_ids=list(professor_ids))
count += 1
print 'added professors for %d courses in mongodb' % count
def update_redis_course_professor_rating():
# course => professors => ratings
def get_rating_fn(courses, uc):
if uc.professor_id is None:
return None
if uc.course_id not in courses:
courses[uc.course_id] = {}
professors = courses[uc.course_id]
if uc.professor_id not in professors:
obj = {}
for field in PROFESSOR_RATING_FIELDS:
obj[field] = m.AggregateRating()
professors[uc.professor_id] = obj
return professors[uc.professor_id]
def get_fields_fn(uc):
return [
('easiness', uc.course_review.easiness),
('clarity', uc.professor_review.clarity),
('passion', uc.professor_review.passion),
]
def get_aggregate_fields_fn(uc):
return [
('easiness', uc.easiness),
('clarity', uc.clarity),
('passion', uc.passion),
]
courses = {}
args = [courses, get_rating_fn]
menlo_ucs = m.MenloCourse.get_publicly_visible(rmc_util.MIN_NUM_RATINGS)
flow_ucs = m.UserCourse.get_publicly_visible(rmc_util.MIN_NUM_RATINGS)
increment_ratings(*(args + [get_fields_fn, menlo_ucs]))
increment_ratings(*(args + [get_fields_fn, flow_ucs]))
count = [0]
def set_course_professor_ratings_in_redis(courses):
for course_id, professors in courses.items():
for professor_id, ratings in professors.items():
if professor_id is None:
continue
professor = m.Professor.objects.with_id(professor_id)
if not professor:
continue
for rating_type, aggregate_rating in ratings.items():
professor.set_course_rating_in_redis(
course_id, rating_type, aggregate_rating)
count[0] += 1
set_course_professor_ratings_in_redis(courses)
print 'set %d course professor rating keys in redis' % count[0]
def update_all_fb_friend_list():
for user in m.User.objects():
# TODO(Sandy): Batch requests for performance
if user.fbid and not user.is_fb_token_expired:
try:
user.update_fb_friends(
facebook.get_friend_list(user.fb_access_token))
user.save()
except facebook.FacebookOAuthException as e:
user.fb_access_token_invalid = True
user.save()
except Exception as e:
print "get_friend_list failed for %s with: %s" % (user.id,
e.message)
# TODO(mack): test it when we get data to test with
# TODO(mack): currently sort of duplicate logic in
# User.cache_mutual_course_ids()
def update_redis_friend_mutual_courses():
# TODO(Sandy): Use friend real time updates after it. There's a fb updates
# branch for this, pending on:
# https://developers.facebook.com/bugs/374296595988186?browse=search_50990ddb8a19d9316431973
# Rate limit is 600 calls / 600 seconds / token:
# http://stackoverflow.com/questions/8805316/facebook-graph-api-rate-limit-and-batch-requests
update_all_fb_friend_list()
courses_by_user = {}
for user in m.User.objects.only('friend_ids', 'course_history'):
friend_ids = [str(friend_id) for friend_id in user.friend_ids]
ucs = (m.UserCourse.objects(id__in=user.course_history)
.only('course_id'))
course_ids = [uc.course_id for uc in ucs]
courses_by_user[str(user.id)] = [friend_ids, set(course_ids)]
count = 0
user_pair = set()
for user_id, (friend_ids, courses) in courses_by_user.iteritems():
for friend_id in friend_ids:
if user_id < friend_id:
first_id = user_id
second_id = friend_id
else:
first_id = friend_id
second_id = user_id
if (first_id, second_id) in user_pair:
continue
friend_courses = courses_by_user[friend_id][1]
mutual_courses = courses.intersection(friend_courses)
if mutual_courses:
count += 1
redis_key = m.User.cls_mutual_courses_redis_key(
first_id, second_id)
r.sadd(redis_key, *list(mutual_courses))
user_pair.add((first_id, second_id))
print 'set %d friend pair keys in redis' % count
def update_mongo_points():
total_points = 0
num_course_comments = 0
num_course_ratings = 0
num_course_shares = 0
num_professor_comments = 0
num_professor_ratings = 0
num_professor_shares = 0
num_invites = 0
for user in m.User.objects.only(
'num_invites', 'course_history', 'num_points'):
num_points = 0
if user.num_invites:
num_points += m.PointSource.FIRST_INVITE
num_invites += 1
for uc in m.UserCourse.objects(id__in=user.course_history):
num_points += uc.num_points
if uc.course_review.has_commented:
num_course_comments += 1
if uc.course_review.has_been_rated:
num_course_ratings += 1
if uc.course_review.has_shared:
num_course_shares += 1
if uc.professor_review.has_commented:
num_professor_comments += 1
if uc.professor_review.has_been_rated:
num_professor_ratings += 1
if uc.professor_review.has_shared:
num_professor_shares += 1
user.update(set__num_points=num_points)
total_points += num_points
r.set('total_points', total_points)
print ' ===update_mongo_points ==='
print 'num_course_comments', num_course_comments
print 'num_course_ratings', num_course_ratings
print 'num_course_shares', num_course_shares
print 'num_professor_comments', num_professor_comments
print 'num_professor_ratings', num_professor_ratings
print 'num_professor_shares', num_professor_shares
print 'num_invites', num_invites
def update_exam_schedule():
# Crawl data and store on disk
rmc_crawler.get_opendata_exam_schedule()
# Process the data on disk
errors = rmc_processor.import_opendata_exam_schedules()
print "%d exam schedule items found" % m.Exam.objects().count()
print "%d exam schedule items skipped" % len(errors)
def update_sections():
# Fetch data from OpenData API and cache to files.
rmc_crawler.get_opendata_sections()
# Import from files to DB.
rmc_processor.import_opendata_sections()
# Send push notifications about seat openings.
num_sent = m.GcmCourseAlert.send_eligible_alerts()
num_expired = m.GcmCourseAlert.delete_expired()
print 'Sent %s push notifications and expired %s' % (num_sent, num_expired)
def update_courses():
# First get an up to date list of departments and write to a text file
print "Fetching departments"
rmc_crawler.get_departments()
# Import any departments we don't already have into Mongo
print "Loading departments into Mongo"
rmc_processor.import_departments()
# Hit the endpoints of the OpenData API for each department
print "Fetching courses"
rmc_crawler.get_opendata2_courses()
# Load the data into Mongo
print "Loading courses into Mongo"
rmc_processor.import_courses()
def update_professors_departments():
"""Update the departments_taught field for each professor in Mongo"""
for prof in m.Professor.objects():
prof.departments_taught = prof.get_departments_taught()
prof.save()
def update_scholarships():
"""Update the scholarships available in Mongo"""
print "Fetching scholarships"
rmc_crawler.get_scholarships()
print "Loading scholarships into Mongo"
rmc_processor.import_scholarships()
if __name__ == '__main__':
mongoengine.connect(c.MONGO_DB_RMC)
parser = argparse.ArgumentParser()
mode_mapping = {
'redis_course_professor_rating': update_redis_course_professor_rating,
'redis_friend_mutual_courses': update_redis_friend_mutual_courses,
'mongo_course_rating': update_mongo_course_rating,
'mongo_course_professors': update_mongo_course_professors,
'mongo_points': update_mongo_points,
'exam_schedule': update_exam_schedule,
'sections': update_sections,
'courses': update_courses,
'prof_departments': update_professors_departments,
'scholarships': update_scholarships
}
parser.add_argument('mode',
help='one of %s' % ','.join(mode_mapping.keys() + ['daily']))
args = parser.parse_args()
if args.mode == 'daily':
daily_functions = [
update_redis_course_professor_rating,
update_redis_friend_mutual_courses,
update_mongo_course_rating,
update_mongo_course_professors,
update_mongo_points,
update_exam_schedule,
update_professors_departments
]
for func in daily_functions:
try:
func()
except Exception as exp:
print "aggregator.py: function %s threw an exception" % (func)
print exp
elif args.mode in mode_mapping:
func = mode_mapping[args.mode]
func()
else:
sys.exit('The mode %s is not supported' % args.mode)
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for coefficient-wise operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x ** y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
_NEG = lambda x: -x
_ABS = abs
_LT = lambda x, y: x < y
_LE = lambda x, y: x <= y
_GT = lambda x, y: x > y
_GE = lambda x, y: x >= y
_AND = lambda x, y: x & y
_OR = lambda x, y: x | y
_XOR = lambda x, y: x ^ y
_INV = lambda x: ~x
class UnaryOpTest(tf.test.TestCase):
def _compareCpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with self.test_session(use_gpu=False):
inx = tf.convert_to_tensor(x)
y = tf_func(inx)
tf_cpu = y.eval()
self.assertShapeEqual(np_ans, y)
self.assertAllClose(np_ans, tf_cpu)
# TODO(ebrevdo): consider adding polygamma function
if tf_func in (tf.digamma,):
return # Return early
if x.dtype == np.float32:
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
y,
s,
x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
y,
s,
x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with self.test_session(use_gpu=True):
result = tf_func(tf.convert_to_tensor(x))
tf_gpu = result.eval()
self.assertShapeEqual(np_ans, result)
self.assertAllClose(np_ans, tf_gpu)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareBoth(self, x, np_func, tf_func):
self._compareCpu(x, np_func, tf_func)
self._compareGpu(x, np_func, tf_func)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _sigmoid(self, x):
return 1.0 / (1.0 + np.exp(-x))
def _replace_domain_error_with_inf(self, fn):
def func(x):
try:
return fn(x)
except ValueError as e:
if "domain error" in str(e):
return np.inf * np.ones_like(x)
else:
raise e
return func
def testFloatBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
y = (x + .5).astype(np.float32) # no zero
z = (x + 15.5).astype(np.float32) # all positive
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(z, np.sqrt, tf.sqrt)
self._compareBoth(z, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(z, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(y, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
tf.lgamma)
self._compareBoth(x, np.vectorize(math.erf), tf.erf)
self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
def testFloatTanhEdge(self):
x = np.arange(40, 40 + 6).reshape(6).astype(np.float32)
self._compareBoth(x, np.tanh, tf.tanh)
x = np.arange(-40, -40 + 6).reshape(6).astype(np.float32)
self._compareBoth(x, np.tanh, tf.tanh)
def testFloatEmpty(self):
x = np.empty((2, 0, 5), dtype=np.float32)
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(x, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(x, np.sqrt, tf.sqrt)
self._compareBoth(x, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(x, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(x, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
# Can't use vectorize below, so just use some arbitrary function
self._compareBoth(x, np.sign, tf.lgamma)
self._compareBoth(x, np.sign, tf.erf)
self._compareBoth(x, np.sign, tf.erfc)
def testDoubleBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64)
y = (x + .5).astype(np.float64) # no zero
z = (x + 15.5).astype(np.float64) # all positive
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(z, np.sqrt, tf.sqrt)
self._compareBoth(z, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(z, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(y, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
tf.lgamma)
self._compareBoth(x, np.vectorize(math.erf), tf.erf)
self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
def testInt32Basic(self):
x = np.arange(-6, 6, 2).reshape(1, 3, 2).astype(np.int32)
self._compareCpu(x, np.abs, tf.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(x, np.square, tf.square)
self._compareCpu(x, np.sign, tf.sign)
def testInt64Basic(self):
x = np.arange(
-6 << 40, 6 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
self._compareCpu(x, np.abs, tf.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareCpu(x, np.negative, tf.neg)
self._compareCpu(x, np.negative, _NEG)
self._compareCpu(x, np.square, tf.square)
self._compareCpu(x, np.sign, tf.sign)
def testComplex64Basic(self):
x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(
np.complex64)
y = x + 0.5 # no zeros
self._compareCpu(x, np.abs, tf.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareCpu(x, np.negative, tf.neg)
self._compareCpu(x, np.negative, _NEG)
self._compareCpu(y, self._inv, tf.inv)
self._compareCpu(x, np.square, tf.square)
self._compareCpu(x, np.sqrt, tf.sqrt)
self._compareCpu(y, self._rsqrt, tf.rsqrt)
self._compareCpu(x, np.exp, tf.exp)
self._compareCpu(y, np.log, tf.log)
self._compareCpu(x, np.tanh, tf.tanh)
self._compareCpu(x, self._sigmoid, tf.sigmoid)
self._compareCpu(x, np.sin, tf.sin)
self._compareCpu(x, np.cos, tf.cos)
self._compareCpu(x, np.angle, tf.arg)
class BinaryOpTest(tf.test.TestCase):
def _compareCpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=False):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = out.eval()
# Test that the op takes precedence over numpy operators.
np_left = tf_func(x, iny).eval()
np_right = tf_func(inx, y).eval()
if np_ans.dtype != np.object:
self.assertAllClose(np_ans, tf_cpu)
self.assertAllClose(np_ans, np_left)
self.assertAllClose(np_ans, np_right)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, x, y, np_func, tf_func):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = tf.test.compute_gradient(inx,
xs,
out,
zs,
x_init_value=x)
if x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, x, y, np_func, tf_func):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
ys,
out,
zs,
x_init_value=y)
if x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = out.eval()
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareBoth(self, x, y, np_func, tf_func):
self._compareCpu(x, y, np_func, tf_func)
if x.dtype in (np.float32, np.float64):
if tf_func not in (_FLOORDIV, tf.floordiv):
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
def testFloatBasic(self):
x = np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(np.float32)
y = np.linspace(20, -20, 6).reshape(1, 3, 2).astype(np.float32)
self._compareBoth(x, y, np.add, tf.add)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
def testFloatDifferentShapes(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.float32)
y = np.array([1, 2]).reshape(2, 1).astype(np.float32)
with self.test_session() as sess:
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
s = tf.reduce_sum(inx * iny)
gx, gy = sess.run(tf.gradients(s, [inx, iny]))
# gx is simply the broadcasted y
self.assertAllEqual(gx, np.array([1, 1, 2, 2])
.reshape(2, 2).astype(np.float32))
# gy is x's column summed up
self.assertAllEqual(gy, np.array([3, 7]).
reshape(2, 1).astype(np.float32))
def testDoubleBasic(self):
x = np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(np.float64)
y = np.linspace(20, -20, 6).reshape(1, 3, 2).astype(np.float64)
self._compareBoth(x, y, np.add, tf.add)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
def testInt8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int8)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int16)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt32Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
self._compareBoth(x, y, np.add, tf.add)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.true_divide, tf.truediv)
self._compareBoth(x, y, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.mod, tf.mod)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
def testInt64Basic(self):
x = np.arange(1 << 40, 13 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int64)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.true_divide, tf.truediv)
self._compareBoth(x, y, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.mod, tf.mod)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
def testComplex64Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex64)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex64)
self._compareCpu(x, y, np.add, tf.add)
self._compareCpu(x, y, np.subtract, tf.sub)
self._compareCpu(x, y, np.multiply, tf.mul)
self._compareCpu(x, y + 0.1, np.true_divide, tf.truediv)
self._compareCpu(x, y, np.add, _ADD)
self._compareCpu(x, y, np.subtract, _SUB)
self._compareCpu(x, y, np.multiply, _MUL)
self._compareCpu(x, y + 0.1, np.true_divide, _TRUEDIV)
def testStringComparison(self):
x = np.array([["abc", "bh"], ["c", ""]])
y = np.array([["abc", "bh"], ["def", "hi"]])
with self.test_session(use_gpu=False) as sess:
cmp_eq = tf.equal(x, y)
cmp_not_eq = tf.not_equal(x, y)
values = sess.run([cmp_eq, cmp_not_eq])
self.assertAllEqual([[True, True], [False, False]], values[0])
self.assertAllEqual([[False, False], [True, True]], values[1])
def testString(self):
x = np.array([["x_0_0", "x_0_1", "x_0_2"],
["x_1_0", "x_1_1", "x_1_2"],
["x_2_0", "x_2_1", "x_2_2"]], dtype=np.object)
y = np.array([["y_0_0", "y_0_1", "y_0_2"],
["y_1_0", "y_1_1", "y_1_2"],
["y_2_0", "y_2_1", "y_2_2"]], dtype=np.object)
z = np.array([["z_0", "z_1", "z_2"]], dtype=np.object)
w = np.array("w", dtype=np.object)
self._compareCpu(x, y, _ADD, _ADD)
self._compareCpu(x, z, _ADD, _ADD)
self._compareCpu(x, w, _ADD, _ADD)
self._compareCpu(z, w, _ADD, _ADD)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = (1 + np.linspace(0, 5, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 5, np.prod(ys))).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
if x.dtype in (np.float32, np.float64):
if tf_func not in (_FLOORDIV, tf.floordiv):
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
# TODO(josh11b,vrv): Refactor this to use parameterized tests.
def _testBCastByFunc(self, funcs, xs, ys):
dtypes = [
np.float32,
np.float64,
np.int32,
np.int64,
np.complex64
]
for dtype in dtypes:
for (np_func, tf_func) in funcs:
if dtype == np.complex64 and tf_func in (_FLOORDIV, tf.floordiv):
continue # floordiv makes no sense for complex numbers
self._compareBCast(xs, ys, dtype, np_func, tf_func)
self._compareBCast(ys, xs, dtype, np_func, tf_func)
def _testBCastA(self, xs, ys):
funcs = [
(np.add, tf.add),
(np.add, _ADD),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastB(self, xs, ys):
funcs = [
(np.subtract, tf.sub),
(np.subtract, _SUB),
(np.power, tf.pow),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastC(self, xs, ys):
funcs = [
(np.multiply, tf.mul),
(np.multiply, _MUL),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastD(self, xs, ys):
funcs = [
(np.true_divide, tf.truediv),
(np.floor_divide, tf.floordiv),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
self._testBCastByFunc(funcs, xs, ys)
def testBCast_0A(self):
self._testBCastA([1, 3, 2], [1])
def testBCast_0B(self):
self._testBCastB([1, 3, 2], [1])
def testBCast_0C(self):
self._testBCastC([1, 3, 2], [1])
def testBCast_0D(self):
self._testBCastD([1, 3, 2], [1])
def testBCast_1A(self):
self._testBCastA([1, 3, 2], [2])
def testBCast_1B(self):
self._testBCastB([1, 3, 2], [2])
def testBCast_1C(self):
self._testBCastC([1, 3, 2], [2])
def testBCast_1D(self):
self._testBCastD([1, 3, 2], [2])
def testBCast_2A(self):
self._testBCastA([1, 3, 2], [3, 2])
def testBCast_2B(self):
self._testBCastB([1, 3, 2], [3, 2])
def testBCast_2C(self):
self._testBCastC([1, 3, 2], [3, 2])
def testBCast_2D(self):
self._testBCastD([1, 3, 2], [3, 2])
def testBCast_3A(self):
self._testBCastA([1, 3, 2], [3, 1])
def testBCast_3B(self):
self._testBCastB([1, 3, 2], [3, 1])
def testBCast_3C(self):
self._testBCastC([1, 3, 2], [3, 1])
def testBCast_3D(self):
self._testBCastD([1, 3, 2], [3, 1])
def testBCast_4A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
def testBCast_4B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
def testBCast_4C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
def testBCast_4D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
def testBCast_5A(self):
self._testBCastA([1, 3, 2], [2, 3, 1])
def testBCast_5B(self):
self._testBCastB([1, 3, 2], [2, 3, 1])
def testBCast_5C(self):
self._testBCastC([1, 3, 2], [2, 3, 1])
def testBCast_5D(self):
self._testBCastD([1, 3, 2], [2, 3, 1])
def testBCast_6A(self):
self._testBCastA([1, 3, 2], [2, 1, 1])
def testBCast_6B(self):
self._testBCastB([1, 3, 2], [2, 1, 1])
def testBCast_6C(self):
self._testBCastC([1, 3, 2], [2, 1, 1])
def testBCast_6D(self):
self._testBCastD([1, 3, 2], [2, 1, 1])
def testBCast_7A(self):
self._testBCastA([1, 3, 2], [1, 3, 1])
def testBCast_7B(self):
self._testBCastB([1, 3, 2], [1, 3, 1])
def testBCast_7C(self):
self._testBCastC([1, 3, 2], [1, 3, 1])
def testBCast_7D(self):
self._testBCastD([1, 3, 2], [1, 3, 1])
def testBCast_8A(self):
self._testBCastA([2, 1, 5], [2, 3, 1])
def testBCast_8B(self):
self._testBCastB([2, 1, 5], [2, 3, 1])
def testBCast_8C(self):
self._testBCastC([2, 1, 5], [2, 3, 1])
def testBCast_8D(self):
self._testBCastD([2, 1, 5], [2, 3, 1])
def testBCast_9A(self):
self._testBCastA([2, 0, 5], [2, 0, 1])
def testBCast_9B(self):
self._testBCastB([2, 0, 5], [2, 0, 1])
def testBCast_9C(self):
self._testBCastC([2, 0, 5], [2, 0, 1])
def testBCast_9D(self):
self._testBCastD([2, 0, 5], [2, 0, 1])
def testBCast_10A(self):
self._testBCastA([2, 3, 0], [2, 3, 1])
def testBCast_10B(self):
self._testBCastB([2, 3, 0], [2, 3, 1])
def testBCast_10C(self):
self._testBCastC([2, 3, 0], [2, 3, 1])
def testBCast_10D(self):
self._testBCastD([2, 3, 0], [2, 3, 1])
def testBCast_11A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
def testBCast_11B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
def testBCast_11C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
def testBCast_11D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
def testBCast_12A(self):
self._testBCastA([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12B(self):
self._testBCastB([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12C(self):
self._testBCastC([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12D(self):
self._testBCastD([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_13A(self):
self._testBCastA([1, 3, 2, 1, 1], [1])
def testBCast_13B(self):
self._testBCastB([1, 3, 2, 1, 1], [1])
def testBCast_13C(self):
self._testBCastC([1, 3, 2, 1, 1], [1])
def testBCast_13D(self):
self._testBCastD([1, 3, 2, 1, 1], [1])
def testBCast_14A(self):
self._testBCastA([2, 3, 1, 1, 5], [1])
def testBCast_14B(self):
self._testBCastB([2, 3, 1, 1, 5], [1])
def testBCast_14C(self):
self._testBCastC([2, 3, 1, 1, 5], [1])
def testBCast_14D(self):
self._testBCastD([2, 3, 1, 1, 5], [1])
def testBCast_15A(self):
self._testBCastA([10, 3, 1, 2], [3, 1, 2])
def testBCast_15B(self):
self._testBCastB([10, 3, 1, 2], [3, 1, 2])
def testBCast_15C(self):
self._testBCastC([10, 3, 1, 2], [3, 1, 2])
def testBCast_15D(self):
self._testBCastD([10, 3, 1, 2], [3, 1, 2])
def testMismatchedDimensions(self):
for func in [tf.add, tf.sub, tf.mul, tf.div, _ADD, _SUB, _MUL, _TRUEDIV,
_FLOORDIV]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Incompatible shapes" in str(e)):
func(tf.convert_to_tensor([10.0, 20.0, 30.0]),
tf.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
class ComparisonOpTest(tf.test.TestCase):
def _compare(self, func, x, y, dtype):
with self.test_session(use_gpu=False):
out = func(tf.convert_to_tensor(np.array([x]).astype(dtype)),
tf.convert_to_tensor(np.array([y]).astype(dtype)))
ret = out.eval()
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compare(tf.less, x, y, t),
x < y)
self.assertEqual(self._compare(tf.less_equal, x, y, t),
x <= y)
self.assertEqual(self._compare(tf.greater, x, y, t),
x > y)
self.assertEqual(self._compare(tf.greater_equal, x, y, t),
x >= y)
self.assertEqual(self._compare(tf.equal, x, y, t),
x == y)
self.assertEqual(self._compare(tf.not_equal, x, y, t),
x != y)
def _compareCpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=False):
out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))
tf_cpu = out.eval()
self.assertAllEqual(np_ans, tf_cpu)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))
tf_gpu = out.eval()
self.assertAllEqual(np_ans, tf_gpu)
def _compareBoth(self, x, y, np_func, tf_func):
self._compareCpu(x, y, np_func, tf_func)
if x.dtype == np.float32 or x.dtype == np.float64:
self._compareGpu(x, y, np_func, tf_func)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compareBoth(xt, yt, np.less, tf.less)
self._compareBoth(xt, yt, np.less_equal, tf.less_equal)
self._compareBoth(xt, yt, np.greater, tf.greater)
self._compareBoth(xt, yt, np.greater_equal, tf.greater_equal)
self._compareBoth(xt, yt, np.equal, tf.equal)
self._compareBoth(xt, yt, np.not_equal, tf.not_equal)
# TODO(zhifengc): complex64 doesn't work on GPU yet.
self._compareCpu(x.astype(np.complex64), y.astype(np.complex64),
np.equal, tf.equal)
self._compareCpu(x.astype(np.complex64), y.astype(np.complex64),
np.not_equal, tf.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
self._compareCpu(y, x, np_func, tf_func)
if x.dtype == np.float32 or x.dtype == np.float64:
self._compareGpu(x, y, np_func, tf_func)
self._compareGpu(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float32,
np.float64,
np.int32,
np.int64,
]
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, tf.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, tf.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, tf.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, tf.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, tf.equal)
def testBCastNotEqual(self):
self._testBCastByFunc(np.not_equal, tf.not_equal)
def testShapeMismatch(self):
dtypes = [np.float32, np.float64, np.int32, np.int64]
funcs = [tf.less, tf.less_equal, tf.greater,
tf.greater_equal, tf.equal, tf.not_equal]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Incompatible shapes" in str(e)):
f(x.astype(t), y.astype(t))
class LogicalOpTest(tf.test.TestCase):
def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):
np_ans = np_func(x, y)
with self.test_session(use_gpu=use_gpu):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_val = out.eval()
self.assertEqual(out.dtype, tf.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def _not(self, x, use_gpu=False):
np_ans = np.logical_not(x)
with self.test_session(use_gpu=use_gpu):
out = tf.logical_not(tf.convert_to_tensor(x))
tf_val = out.eval()
self.assertEqual(out.dtype, tf.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def testScalar(self):
data = [np.array([True]), np.array([False])]
for use_gpu in [True, False]:
for x in data:
self._not(x, use_gpu)
for x in data:
for y in data:
self._compareBinary(
x, y, np.logical_and, tf.logical_and, use_gpu)
self._compareBinary(
x, y, np.logical_or, tf.logical_or, use_gpu)
self._compareBinary(
x, y, np.logical_xor, tf.logical_xor, use_gpu)
def testTensor(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for use_gpu in [True, False]:
self._not(x, use_gpu)
self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)
def testBCast(self):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
for (xs, ys) in shapes:
x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)
y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)
for use_gpu in [True, False]:
self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)
def testShapeMismatch(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)
for f in [tf.logical_and, tf.logical_or, tf.logical_xor]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Incompatible shapes" in str(e)):
f(x, y)
class SelectOpTest(tf.test.TestCase):
def _compare(self, c, x, y, use_gpu):
np_ans = np.where(c, x, y)
with self.test_session(use_gpu=use_gpu):
out = tf.select(c, x, y)
tf_ans = out.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
out,
s,
x_init_value=x)
if x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
s,
out,
s,
x_init_value=y)
if x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testBasic(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [np.float32, np.float64, np.int32, np.int64, np.complex64]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
def testGradients(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
self._compareGradientX(c, xt, yt)
self._compareGradientY(c, xt, yt)
def testShapeMismatch(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(2, 5, 3) * 100
for t in [np.float32, np.float64, np.int32, np.int64, np.complex64]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
tf.select(c, xt, yt)
class BatchSelectOpTest(tf.test.TestCase):
"""Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+."""
def _compare(self, c, x, y, use_gpu):
np_ans = np.dstack(
[x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(
[2, 0, 1])
with self.test_session(use_gpu=use_gpu):
out = tf.select(c, x, y)
tf_ans = out.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
out,
s,
x_init_value=x)
if x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
s,
out,
s,
x_init_value=y)
if x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testBasic(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [np.float32, np.float64, np.int32, np.int64, np.complex64]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
def testGradients(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
self._compareGradientX(c, xt, yt)
self._compareGradientY(c, xt, yt)
def testShapeMismatch(self):
c = np.random.randint(0, 2, 8).astype(np.bool)
x = np.random.rand(16, 3, 2) * 100
y = np.random.rand(16, 3, 2) * 100
for t in [np.float32, np.float64, np.int32, np.int64, np.complex64]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
tf.select(c, xt, yt)
class MinMaxOpTest(tf.test.TestCase):
def _compare(self, x, y, use_gpu):
np_min, np_max = np.minimum(x, y), np.maximum(x, y)
with self.test_session(use_gpu=use_gpu) as sess:
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
omin, omax = tf.minimum(inx, iny), tf.maximum(inx, iny)
tf_min, tf_max = sess.run([omin, omax])
self.assertAllEqual(np_min, tf_min)
self.assertAllEqual(np_max, tf_max)
def testBasic(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1, 3, 2) * 100.
for t in [np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testDifferentShapes(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(2) * 100. # should broadcast
for t in [np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testScalar(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.asscalar(np.random.rand(1) * 100.) # should broadcast
# dropped np.float64, int64 because TF automatically converts to 32 bit
for t in [np.float32, np.int32]:
self._compare(x.astype(t), t(y), use_gpu=False)
self._compare(x.astype(t), t(y), use_gpu=True)
def _compareGradientX(self, func, x, y):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
out,
s,
x_init_value=x)
if x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, func, x, y):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
s,
out,
s,
x_init_value=y)
if x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testGradients(self):
x = np.random.rand(1, 3, 2) * 100.
# ensure x != y
y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1
self._compareGradientX(tf.maximum, x, y)
self._compareGradientY(tf.maximum, x, y)
self._compareGradientX(tf.minimum, x, y)
self._compareGradientY(tf.minimum, x, y)
class MathOpsOverloadTest(tf.test.TestCase):
def _computeTensorAndLiteral(self, x, y, dtype, func):
with self.test_session(use_gpu=False):
inx = tf.convert_to_tensor(x, dtype=dtype)
z = func(inx, y) # Should use __add__, __sub__, etc.
return z.eval()
def _computeLiteralAndTensor(self, x, y, dtype, func):
with self.test_session(use_gpu=False):
iny = tf.convert_to_tensor(y, dtype=dtype)
z = func(x, iny) # Should use __radd__, __rsub__, etc.
return z.eval()
def _compareBinary(self, x, y, dtype, np_func, tf_func):
np_ans = np_func(x, y)
self.assertAllClose(np_ans, self._computeTensorAndLiteral(
x, y, dtype, tf_func))
self.assertAllClose(np_ans, self._computeLiteralAndTensor(
x, y, dtype, tf_func))
def _compareUnary(self, x, dtype, np_func, tf_func):
np_ans = np_func(x)
with self.test_session(use_gpu=False):
self.assertAllClose(np_ans, tf_func(tf.convert_to_tensor(x, dtype=dtype)).eval())
def testOverload(self):
dtypes = [
tf.float32,
tf.float64,
tf.int32,
tf.int64,
tf.complex64,
]
funcs = [
(np.add, _ADD),
(np.subtract, _SUB),
(np.multiply, _MUL),
(np.power, _POW),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
if dtype == tf.complex64 and tf_func == _FLOORDIV:
continue # floordiv makes no sense for complex
self._compareBinary(10, 5, dtype, np_func, tf_func)
# Mod only works for int32 and int64.
for dtype in [tf.int32, tf.int64]:
self._compareBinary(10, 3, dtype, np.mod, _MOD)
def testOverloadComparisons(self):
dtypes = [
tf.float32,
tf.float64,
tf.int32,
tf.int64,
]
funcs = [
(np.less, _LT),
(np.less_equal, _LE),
(np.greater, _GT),
(np.greater_equal, _GE),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
self._compareBinary(10, 5, dtype, np_func, tf_func)
logical_funcs = [
(np.logical_and, _AND),
(np.logical_or, _OR),
(np.logical_xor, _XOR),
]
for np_func, tf_func in logical_funcs:
self._compareBinary(True, False, tf.bool, np_func, tf_func)
self._compareBinary(True, True, tf.bool, np_func, tf_func)
self._compareBinary(False, False, tf.bool, np_func, tf_func)
self._compareBinary(False, True, tf.bool, np_func, tf_func)
self._compareBinary([True, True, False, False],
[True, False, True, False],
tf.bool, np_func, tf_func)
self._compareUnary(True, tf.bool, np.logical_not, _INV)
self._compareUnary(False, tf.bool, np.logical_not, _INV)
self._compareUnary([True, False], tf.bool, np.logical_not, _INV)
class IsFiniteInfNanTest(tf.test.TestCase):
def _compare(self, x, use_gpu):
np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)
with self.test_session(use_gpu=use_gpu) as sess:
inx = tf.convert_to_tensor(x)
ofinite, oinf, onan = tf.is_finite(inx), tf.is_inf(
inx), tf.is_nan(inx)
tf_finite, tf_inf, tf_nan = sess.run([ofinite, oinf, onan])
self.assertAllEqual(np_inf, tf_inf)
self.assertAllEqual(np_nan, tf_nan)
self.assertAllEqual(np_finite, tf_finite)
self.assertShapeEqual(np_inf, oinf)
self.assertShapeEqual(np_nan, onan)
self.assertShapeEqual(np_finite, ofinite)
def _testDtype(self, dtype):
fi = np.finfo(dtype)
data = np.array([0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max,
-np.inf, np.inf, np.nan]).astype(dtype)
self._compare(data, use_gpu=False)
self._compare(data, use_gpu=True)
def testFloat(self):
self._testDtype(np.float32)
def testDouble(self):
self._testDtype(np.float64)
class RoundingTest(tf.test.TestCase):
def _compare(self, x, use_gpu):
np_floor, np_ceil = np.floor(x), np.ceil(x)
with self.test_session(use_gpu=use_gpu) as sess:
inx = tf.convert_to_tensor(x)
ofloor, oceil = tf.floor(inx), tf.ceil(inx)
tf_floor, tf_ceil = sess.run([ofloor, oceil])
self.assertAllEqual(np_floor, tf_floor)
self.assertAllEqual(np_ceil, tf_ceil)
self.assertShapeEqual(np_floor, ofloor)
self.assertShapeEqual(np_ceil, oceil)
def _testDtype(self, dtype):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
self._compare(data, use_gpu=True)
self._compare(data, use_gpu=True)
def testTypes(self):
for dtype in [np.float32, np.float64]:
self._testDtype(dtype)
class ComplexMakeRealImagTest(tf.test.TestCase):
def _compareMake(self, real, imag, use_gpu):
np_ans = real + (1j) * imag
with self.test_session(use_gpu=use_gpu):
real = tf.convert_to_tensor(real)
imag = tf.convert_to_tensor(imag)
tf_ans = tf.complex(real, imag)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def testMake(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
for use_gpu in [False, True]:
self._compareMake(real, imag, use_gpu)
self._compareMake(real, 12.0, use_gpu)
self._compareMake(23.0, imag, use_gpu)
def _compareRealImag(self, cplx, use_gpu):
np_real, np_imag = np.real(cplx), np.imag(cplx)
with self.test_session(use_gpu=use_gpu) as sess:
inx = tf.convert_to_tensor(cplx)
tf_real = tf.real(inx)
tf_imag = tf.imag(inx)
tf_real_val, tf_imag_val = sess.run([tf_real, tf_imag])
self.assertAllEqual(np_real, tf_real_val)
self.assertAllEqual(np_imag, tf_imag_val)
self.assertShapeEqual(np_real, tf_real)
self.assertShapeEqual(np_imag, tf_imag)
def testRealImag(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + (1j) * imag
self._compareRealImag(cplx, use_gpu=False)
self._compareRealImag(cplx, use_gpu=True)
def _compareConj(self, cplx, use_gpu):
np_ans = np.conj(cplx)
with self.test_session(use_gpu=use_gpu):
inx = tf.convert_to_tensor(cplx)
tf_conj = tf.conj(inx)
tf_ans = tf_conj.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, tf_conj)
def testConj(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + (1j) * imag
self._compareConj(cplx, use_gpu=False)
self._compareConj(cplx, use_gpu=True)
def _compareGradient(self, x):
# x[:, 0] is real, x[:, 1] is imag. We combine real and imag into
# complex numbers. Then, we extract real and imag parts and
# computes the squared sum. This is obviously the same as sum(real
# * real) + sum(imag * imag). We just want to make sure the
# gradient function is checked.
with self.test_session():
inx = tf.convert_to_tensor(x)
real, imag = tf.split(1, 2, inx)
real, imag = tf.reshape(real, [-1]), tf.reshape(imag, [-1])
cplx = tf.complex(real, imag)
cplx = tf.conj(cplx)
loss = tf.reduce_sum(
tf.square(tf.real(cplx))) + tf.reduce_sum(
tf.square(tf.imag(cplx)))
epsilon = 1e-3
jacob_t, jacob_n = tf.test.compute_gradient(inx,
list(x.shape),
loss,
[1],
x_init_value=x,
delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def testGradient(self):
data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float32)
self._compareGradient(data)
def _compareMulGradient(self, data):
# data is a float matrix of shape [n, 4]. data[:, 0], data[:, 1],
# data[:, 2], data[:, 3] are real parts of x, imaginary parts of
# x, real parts of y and imaginary parts of y.
with self.test_session():
inp = tf.convert_to_tensor(data)
xr, xi, yr, yi = tf.split(1, 4, inp)
def vec(x): # Reshape to a vector
return tf.reshape(x, [-1])
xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)
def cplx(r, i): # Combine to a complex vector
return tf.complex(r, i)
x, y = cplx(xr, xi), cplx(yr, yi)
# z is x times y in complex plane.
z = x * y
# Defines the loss function as the sum of all coefficients of z.
loss = tf.reduce_sum(tf.real(z) + tf.imag(z))
epsilon = 0.005
jacob_t, jacob_n = tf.test.compute_gradient(inp,
list(data.shape),
loss,
[1],
x_init_value=data,
delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def testMulGradient(self):
data = np.arange(1, 2, 0.125).reshape([2, 4]).astype(np.float32)
self._compareMulGradient(data)
class AccumulateTest(tf.test.TestCase):
def testSimple(self):
with self.test_session():
random_arrays = [np.random.rand(16, 16, 16, 16).astype(np.float32)
for _ in range(20)]
random_tensors = [tf.convert_to_tensor(x, dtype=tf.float32)
for x in random_arrays]
tf_val = tf.accumulate_n(random_tensors)
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, tf_val.eval())
def testZeroArgs(self):
with self.test_session():
with self.assertRaises(ValueError):
tf_val = tf.accumulate_n([])
tf_val.eval()
if __name__ == "__main__":
tf.test.main()
| |
"""
Analyze lux sensor data and use it to predict future behavior of the light.
"""
import math
from numpy.core.numeric import NaN
from collections import deque
from sklearn.preprocessing import StandardScaler
from thingflow.base import OutputThing, FunctionFilter,\
SensorEvent, filtermethod
from thingflow.filters.transducer import SensorSlidingMean
from lux_time_utils import get_sunrise_sunset, time_of_day_to_zone,\
NUM_ZONES, dt_to_minutes, minutes_to_time
MAX_TIME_INTERVAL = 60*4
EXPECTED_TIME_INTERVAL = 60
@filtermethod(OutputThing)
def fill_in_missing_times(this):
def on_next(self, x):
if (self.last_time is not None) and \
(x.ts - self.last_time)>MAX_TIME_INTERVAL:
ts = self.last_time + EXPECTED_TIME_INTERVAL
missing = 0
while (x.ts-ts)>EXPECTED_TIME_INTERVAL:
if missing==0:
self._dispatch_next(SensorEvent(sensor_id=x.sensor_id,
ts=ts, val=NaN))
ts += EXPECTED_TIME_INTERVAL
missing += 1
print("Found %s missing samples" % missing)
self.last_time = x.ts
self._dispatch_next(x)
f = FunctionFilter(this, on_next=on_next,
name="fill_in_missing_times()")
setattr(f, 'last_time', None)
return f
class SensorSlidingMeanPassNaNs(SensorSlidingMean):
"""Variant of SensorSlidingMean that passes on NaN-valued events without
including them in the history. We clear the sliding window each time
an NaN is found.
"""
def __init__(self, history_samples):
super().__init__(history_samples)
def step(self, event):
if math.isnan(event.val):
if len(self.history)>0:
#self.state -= self.history.popleft().val
self.state = None
self.history = deque(maxlen=self.history_samples)
return event
else:
return SensorSlidingMean.step(self, event)
class CaptureNaNIndexes:
"""AntEvents subscriber that watches for values that are
NaN and tracks those indexes.
"""
def __init__(self):
self.idx = 0
self.nan_indexes = []
def on_next(self, x):
if math.isnan(x.val):
self.nan_indexes.append(self.idx)
self.idx += 1
def on_error(self, e):
pass
def on_completed(self):
pass
def replace_nans(self, array, val):
for idx in self.nan_indexes:
print("replacing index %s with %s" % (idx, val))
array[idx] = val
def new_array_replace_nans(self, array, replace_value):
result = []
for (idx, val) in enumerate(array):
if idx in self.nan_indexes:
result.append(replace_value)
else:
result.append(val)
return result
class HmmScanner:
"""Scan through a set of samples in sequential order and
build up sequences for each time zone that can be passed to
the HMM learn. For each zone, we need to pass the fit() method
the concatenated sequence of samples and the lengths of each
subsequence. We break the samples into multiple subsequences
whenever we encounter a time gap (as indicated via a NaN value)
or when we cross between zones.
"""
def __init__(self):
self.length = None
self.zone = None
self.samples_by_zone = [[] for zone in range(NUM_ZONES)]
self.lengths_by_zone = [[] for zone in range(NUM_ZONES)]
def _start_sequence(self, zone, s):
self.length = 1
self.zone = zone
self.samples_by_zone[zone].append(s)
def _complete_sequence(self):
if self.length is not None:
assert self.zone is not None
self.lengths_by_zone[self.zone].append(self.length)
self.zone = None
self.length = None
def process_samples(self, samples, timestamps):
for (s, t) in zip(samples, timestamps):
s = int(s) if not math.isnan(s) else NaN
(sunrise, sunset) = get_sunrise_sunset(t.year, t.month, t.day)
current_zone = time_of_day_to_zone(dt_to_minutes(t), sunrise,
sunset)
if self.length is None:
if math.isnan(s):
continue
else:
self._start_sequence(current_zone, s)
elif math.isnan(s):
self._complete_sequence()
elif self.zone != current_zone:
self._complete_sequence()
self._start_sequence(current_zone, s)
else: # just extend the current sequence
self.length += 1
self.samples_by_zone[self.zone].append(s)
# see if there was an in-progress sequence for which we need to add
# the length
self._complete_sequence()
# sanity check
for zone in range(NUM_ZONES):
assert sum(self.lengths_by_zone[zone])==len(self.samples_by_zone[zone])
BACKCHECK_LENGTH = 10
class ScanState:
"""A state machine for finding sequences of on or off samples
"""
WAITING_FOR_TRANSITION = 0
RECORDING_LENGTH = 1
NAN_STATE = 2
STATE_NAMES=['WAITING', 'RECORDING' 'NAN']
def __init__(self):
self.state = None
self.prev_sample = None
self.start_zone = None
self.start_time = None
self.prev_zone = None
self.length = None
self.recorded_events = None
# The backcheck queue maintains a queue of older samples.
# This is used to include the sample which is BACKCHECK_LENGTH
# samples back, which is helpful for some machine learning algorihms.
self.backcheck_queue = []
def add_sample(self, s, t):
s = int(s) if not math.isnan(s) else NaN
(sunrise, sunset) = get_sunrise_sunset(t.year, t.month, t.day)
current_zone = time_of_day_to_zone(dt_to_minutes(t), sunrise,
sunset)
if self.state==None:
if math.isnan(s):
self.state = ScanState.NAN_STATE
else:
self.state = ScanState.WAITING_FOR_TRANSITION
print("initial state is %s" % ScanState.STATE_NAMES[self.state])
elif math.isnan(s):
if self.state!=ScanState.NAN_STATE:
print("changing to NAN_STATE at %s" % t)
self.state = ScanState.NAN_STATE
self.length = None
self.start_zone = None
self.start_time = None
self.prev_zone = None
self.recorded_events = None
self.backcheck_queue = []
elif self.state==ScanState.NAN_STATE:
print("changing to WAITING_STATE at %s" % t)
self.state = ScanState.WAITING_FOR_TRANSITION # got a value
elif self.state==ScanState.WAITING_FOR_TRANSITION and (s!=self.prev_sample):
print("changing to RECORDING_STATE(%s) at %s" % (s,t))
self.state = ScanState.RECORDING_LENGTH
self.start_zone = current_zone
self.start_time = t
self.length = 1
back = self.backcheck_queue[0] if len(self.backcheck_queue)>0 else s
self.recorded_events = [(s, t, current_zone, current_zone, 1, back),]
elif self.state==ScanState.RECORDING_LENGTH:
if s==self.prev_sample:
self.length += 1
back = self.backcheck_queue[0] if len(self.backcheck_queue)>0 else s
self.recorded_events.append((s, t, self.start_zone, current_zone,
self.length, back),)
else:
if self.prev_sample==0:
print("OFF sequence zone %s, length %d" % (self.start_zone,
self.length))
self.record_off_sequence(self.start_zone, self.start_time,
self.prev_zone,
self.length)
else:
print("ON sequence zone %s, length %d" % (self.start_zone,
self.length))
self.record_on_sequence(self.start_zone, self.start_time,
self.prev_zone,
self.length)
# we know this was valid, so call record_event() for each event
for (evt_s, evt_dt, evt_start_zone, evt_current_zone,
evt_length, evt_back) in self.recorded_events:
self.record_event(evt_s, evt_dt, evt_start_zone, evt_current_zone,
evt_length, evt_back)
# reset for the new value of s
self.length = 1
self.start_zone = current_zone
self.start_time = t
back = self.backcheck_queue[0] if len(self.backcheck_queue)>0 else s
self.recorded_events = [(s, t, current_zone, current_zone, 1, back),]
self.prev_sample = s
self.prev_zone = current_zone
if not math.isnan(s):
self.backcheck_queue.append(s)
if len(self.backcheck_queue)>BACKCHECK_LENGTH:
self.backcheck_queue.pop(0) # remove the oldest
def record_event(self, s, dt, start_zone, current_zone, current_length, back_event):
"""Template method that is called when we have a valid sample we
can use. A sample is valid if it is preceeded by zero or more samples
of the same value and one or more samples of the other value. This
means we can provide a correct value for the length. If the samples of
the current value were preceeded by a NaN, we don't know for certain
how long that value was present.
"""
pass
def record_on_sequence(self, start_zone, start_time, end_zone, length):
"""Template method that is called when we have a sequence of on samples
we can use. A sequence is valid if it has one or more samples of the
same value, both preceeded and succeeded by one or more samples of the
oppositive value. If there is an NaN on either side, we cannot
conclusively determine the length.
"""
pass
def record_off_sequence(self, start_zone, start_time, end_zone, length):
"""Template method that is called when we have a sequence of off samples
we can use.
"""
pass
class LengthHistogramState(ScanState):
"""Build lists of on/off lengths that can be used to compute
histograms.
"""
def __init__(self):
super().__init__()
self.on_lengths = [[] for i in range(NUM_ZONES)]
self.off_lengths = [[] for i in range(NUM_ZONES)]
def record_on_sequence(self, start_zone, start_time, end_zone, length):
self.on_lengths[start_zone].append(length)
def record_off_sequence(self, start_zone, start_time, end_zone, length):
self.off_lengths[start_zone].append(length)
def build_length_histogram_data(samples, timestamps):
"""Given a series of samples and timestamps, find on and off
sequences and build lists of the sequence lengths per zone. A
'sequence' is a series of samples of the same value preceeded by at
last one sample of the opposite value (as oppposed to a break in the
readings). Returns a LengthHistogramState object containing on_lengths
and off_lengths members.
"""
state = LengthHistogramState()
for (s, t) in zip(samples, timestamps):
state.add_sample(s, t)
for zone in range(NUM_ZONES):
state.on_lengths[zone].sort()
state.off_lengths[zone].sort()
return state
class LightPredictionStates(ScanState):
def __init__(self, trainer):
super().__init__()
self.trainer = trainer
def record_on_sequence(self, start_zone, start_time, end_zone, length):
self.trainer.on_lengths[start_zone].append(length)
self.trainer.on_lengths_with_start.append((dt_to_minutes(start_time), length),)
def record_off_sequence(self, start_zone, start_time, end_zone, length):
self.trainer.off_lengths[start_zone].append(length)
self.trainer.off_lengths_with_start.append((dt_to_minutes(start_time), length),)
def record_event(self, s, dt, start_zone, current_zone, current_length, back_event):
length_bucket = int(round(current_length/BACKCHECK_LENGTH))+1
if s==1:
f = (dt.hour, current_zone, 0, length_bucket, back_event)
else:
f = (dt.hour, current_zone, length_bucket, 0, back_event)
self.trainer.training_features.append(self.trainer.feature_filter(f))
self.trainer.training_targets.append(s)
self.trainer.obs_by_zone[current_zone].append(s)
class LightPredictionTrainer:
"""This class preprocessed the data to create features for machine
learning.
"""
def __init__(self, feature_filter=lambda x: x):
"""The feature filter is a function that can remove elements from
the feature tuple. By default the tuple is
(hour, zone, len_off, len_on, back_event).
"""
self.on_lengths = [[] for i in range(NUM_ZONES)]
self.off_lengths = [[] for i in range(NUM_ZONES)]
self.on_lengths_with_start = []
self.off_lengths_with_start = []
self.training_features = []
self.training_targets = []
self.scaled_features = None
self.scaler = None
self.feature_filter = feature_filter
self.obs_by_zone = [[] for z in range(NUM_ZONES)]
def _compute_lengths(self, samples, timestamps):
state_machine = LightPredictionStates(self)
for (s, t) in zip(samples, timestamps):
state_machine.add_sample(s, t)
print("self.on_lengths")
print(self.on_lengths)
print("self.off_lengths")
print(self.off_lengths)
def _compute_scaled_features(self):
self.scaler = StandardScaler()
self.scaled_features = self.scaler.fit_transform(self.training_features)
def _compute_zones(self):
"""Lets compute some zones where we have the same number of samples
per zone.
"""
self.on_lengths_with_start.sort()
self.off_lengths_with_start.sort()
samples_per_zone = int(round(len(self.on_lengths_with_start)/4))
print("Compute zones: %d total samples, try for %d samples per zone" %
(len(self.on_lengths_with_start), samples_per_zone))
zone_boundaries = []
prev_btime = 0
for boundary_idx in [samples_per_zone, 2*samples_per_zone, 3*samples_per_zone]:
(btime, _) = self.on_lengths_with_start[boundary_idx]
while btime==prev_btime:
# if the same time as the last keep going
boundary_idx += 1
(btime, _) = self.on_lengths_with_start[boundary_idx]
zone_boundaries.append(btime)
for i in zone_boundaries:
(hr, mn) = minutes_to_time(i)
print("computed boundary: %d minutes %02d:%02d" % (i, hr, mn))
def compute(self, samples, timestamps):
self._compute_lengths(samples, timestamps)
self._compute_scaled_features()
self._compute_zones()
def features_for_prediction(self, s, dt, length, sample_queue, scaled=True):
minutes = dt_to_minutes(dt)
(sunrise, sunset) = get_sunrise_sunset(dt.year, dt.month, dt.day)
zone = time_of_day_to_zone(minutes, sunrise, sunset)
length_bucket = int(round(length/BACKCHECK_LENGTH))+1
hist_value = sample_queue[0] if len(sample_queue)>0 else s
if len(sample_queue)>=BACKCHECK_LENGTH:
sample_queue.pop(0)
if s==1:
raw = self.feature_filter((dt.hour, zone, 0, length_bucket, hist_value),)
else:
raw = self.feature_filter((dt.hour, zone, length_bucket, 0, hist_value),)
if scaled:
return self.scaler.transform([raw,])
else:
return [raw,]
def sort_lengths(self):
for lengths in [self.on_lengths, self.off_lengths]:
for zone in range(NUM_ZONES):
lengths[zone].sort()
print("Sorted On Lengths: %s" % self.on_lengths)
print("Sorted Off Lengths: %s" % self.off_lengths)
def lengths_to_histogram(lengths):
sl = sorted(lengths)
if len(sl)==0:
return []
max_len = sl[-1]
hist = [0 for i in range(max_len+1)]
for l in sl:
hist[l] += 1
return hist
#compute_lengths(kmeans_lux, smoothed_series_writer.index)
#ON_PROBS = compute_probs(kmeans_lux, smoothed_series_writer.index)
import random
random.seed()
class LightPredictorLengthOnly:
"""Use the length data to determine when the light should go on and off
and how long each time.
"""
def __init__(self, initial_state, trainer):
self.current_state = initial_state
self.current_length = None
self.current_zone = None
self.trainer = trainer
def _choose_length(self, zone, state):
if state==0:
lengths = self.trainer.off_lengths
else:
lengths = self.trainer.on_lengths
if len(lengths[zone])==0:
orig_zone = zone
while True:
zone = (zone+1)%NUM_ZONES
assert zone!=orig_zone
if len(lengths[zone])>0:
print("No lengths for %s available for zone %s, using value from zone %s" %
('ON' if state else 'Off', orig_zone, zone))
break
base_length = random.choice(lengths[zone])
# add some noise to the length, then subtract 1 since this step is already the first sample
length = max(int(round(random.gauss(base_length, 0.1*base_length))), 1) - 1
print("chose %s length %s, randomized to %s" %
('ON' if state else 'OFF', base_length, length+1))
return length
def predict(self, dt):
(sunrise, sunset) = get_sunrise_sunset(dt.year, dt.month, dt.day)
new_zone = time_of_day_to_zone(dt_to_minutes(dt), sunrise, sunset)
if self.current_zone==None:
self.current_zone = new_zone
if self.current_length==None:
self.current_length = self._choose_length(self.current_zone, self.current_state)
elif self.current_length==0:
self.current_state = 0 if self.current_state else 1
self.current_length = self._choose_length(new_zone, self.current_state)
else:
self.current_length -= 1
self.current_zone = new_zone
return self.current_state
| |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""High level ssh library.
Usage examples:
Execute command and get output:
ssh = sshclient.ssh('root', 'example.com', port=33)
status, stdout, stderr = ssh.execute('ps ax')
if status:
raise Exception('Command failed with non-zero status.')
print stdout.splitlines()
Execute command with huge output:
class PseudoFile(object):
def write(chunk):
if 'error' in chunk:
email_admin(chunk)
ssh = sshclient.ssh('root', 'example.com')
ssh.run('tail -f /var/log/syslog', stdout=PseudoFile(), timeout=False)
Execute local script on remote side:
ssh = sshclient.ssh('user', 'example.com')
status, out, err = ssh.execute('/bin/sh -s arg1 arg2',
stdin=open('~/myscript.sh', 'r'))
Upload file:
ssh = sshclient.ssh('user', 'example.com')
ssh.run('cat > ~/upload/file.gz', stdin=open('/store/file.gz', 'rb'))
Eventlet:
eventlet.monkey_patch(select=True, time=True)
or
eventlet.monkey_patch()
or
sshclient = eventlet.import_patched("opentstack.common.sshclient")
"""
import select
import socket
import time
import paramiko
import six
class SSHError(Exception):
pass
class SSHTimeout(SSHError):
pass
class SSH(object):
"""Represent ssh connection."""
def __init__(self, user, host, port=22, pkey=None,
key_filename=None, password=None):
"""Initialize ssh client.
:param user: ssh username
:param host: hostname or ip address of remote ssh server
:param port: remote ssh port
:param pkey: RSA or DSS private key string or file object
:param key_filename: private key filename
:param password: password
"""
self.user = user
self.host = host
self.port = port
self.pkey = self._get_pkey(pkey) if pkey else None
self.password = password
self.key_filename = key_filename
self._client = False
def _get_pkey(self, key):
if isinstance(key, six.string_types):
key = six.moves.StringIO(key)
errors = []
for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
try:
return key_class.from_private_key(key)
except paramiko.SSHException as e:
errors.append(e)
raise SSHError('Invalid pkey: %s' % (errors))
def _get_client(self):
if self._client:
return self._client
try:
self._client = paramiko.SSHClient()
self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self._client.connect(self.host, username=self.user,
port=self.port, pkey=self.pkey,
key_filename=self.key_filename,
password=self.password, timeout=1)
return self._client
except Exception as e:
message = ("Exception %(exception_type)s was raised "
"during connect. Exception value is: %(exception)r")
self._client = False
raise SSHError(message % {'exception': e,
'exception_type': type(e)})
def close(self):
self._client.close()
self._client = False
def run(self, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600):
"""Execute specified command on the server.
:param cmd: Command to be executed.
:param stdin: Open file or string to pass to stdin.
:param stdout: Open file to connect to stdout.
:param stderr: Open file to connect to stderr.
:param raise_on_error: If False then exit code will be return. If True
then exception will be raized if non-zero code.
:param timeout: Timeout in seconds for command execution.
Default 1 hour. No timeout if set to 0.
"""
client = self._get_client()
if isinstance(stdin, six.string_types):
stdin = six.moves.StringIO(stdin)
return self._run(client, cmd, stdin=stdin, stdout=stdout,
stderr=stderr, raise_on_error=raise_on_error,
timeout=timeout)
def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600):
transport = client.get_transport()
session = transport.open_session()
session.exec_command(cmd)
start_time = time.time()
data_to_send = ''
stderr_data = None
# If we have data to be sent to stdin then `select' should also
# check for stdin availability.
if stdin and not stdin.closed:
writes = [session]
else:
writes = []
while True:
# Block until data can be read/write.
r, w, e = select.select([session], writes, [session], 1)
if session.recv_ready():
data = session.recv(4096)
#LOG.debug('stdout: %r' % data)
if stdout is not None:
stdout.write(data)
continue
if session.recv_stderr_ready():
stderr_data = session.recv_stderr(4096)
#LOG.debug('stderr: %r' % stderr_data)
if stderr is not None:
stderr.write(stderr_data)
continue
if session.send_ready():
if stdin is not None and not stdin.closed:
if not data_to_send:
data_to_send = stdin.read(4096)
if not data_to_send:
stdin.close()
session.shutdown_write()
writes = []
continue
sent_bytes = session.send(data_to_send)
# hack to workaround unicode decode error in logging
# when trying to upload binary file
'''
try:
LOG.debug('sent: %s' % data_to_send[:sent_bytes])
except UnicodeDecodeError:
LOG.debug('sent: <UnicodeDecodeError>')
'''
data_to_send = data_to_send[sent_bytes:]
if session.exit_status_ready():
break
if timeout and (time.time() - timeout) > start_time:
args = {'cmd': cmd, 'host': self.host}
raise SSHTimeout(('Timeout executing command '
'"%(cmd)s" on host %(host)s') % args)
if e:
return None
#raise SSHError('Socket error.')
exit_status = session.recv_exit_status()
if 0 != exit_status and raise_on_error:
fmt = ('Command "%(cmd)s" failed with exit_status %(status)d.')
details = fmt % {'cmd': cmd, 'status': exit_status}
if stderr_data:
details += (' Last stderr data: "%s".') % stderr_data
raise SSHError(details)
return exit_status
def execute(self, cmd, stdin=None, timeout=3600):
"""Execute the specified command on the server.
:param cmd: Command to be executed.
:param stdin: Open file to be sent on process stdin.
:param timeout: Timeout for execution of the command.
:returns: tuple (exit_status, stdout, stderr)
"""
stdout = six.moves.StringIO()
stderr = six.moves.StringIO()
exit_status = self.run(cmd, stderr=stderr,
stdout=stdout, stdin=stdin,
timeout=timeout, raise_on_error=False)
stdout.seek(0)
stderr.seek(0)
return (exit_status, stdout.read(), stderr.read())
def wait(self, timeout=240, interval=1):
"""Wait for the host will be available via ssh."""
start_time = time.time()
while True:
try:
return self.execute('uname')
except (socket.error, SSHError) as e:
#LOG.debug('Ssh is still unavailable: %r' % e)
time.sleep(interval)
if time.time() > (start_time + timeout):
raise SSHTimeout(('Timeout waiting for "%s"') % self.host)
| |
#!/usr/bin/env python3
import pytest
from collections import OrderedDict
import logging
import logging.handlers
import os
import os.path
import re
import sys
sys.path.append(os.getcwd())
from suapp.logdecorator import *
"""
Tests the logdecorator loguse.
"""
# Some classes and functions we're testing with.
class MyClass:
"""Test class for the logdecorator"""
name = "APP"
@loguse(1) # Don't log the variable with index 1 (i.e. name)
def __init__(self, name):
"""Initialize the name of the test class."""
self.name = name
@loguse
def lock(self):
"""Locks the test class."""
logging.getLogger(__name__).info("Locking %s" % (self.name))
@loguse
def unlock(self):
"""Unlocks the test class."""
logging.getLogger(__name__).info("Unlocking %s" % (self.name))
@loguse
def close(self):
"""Closes the test class."""
logging.getLogger(__name__).info("Closing %s" % (self.name))
class SubMyClass(MyClass):
@loguse
def __init__(self, name):
"""Initialize the name of the test class."""
super(SubMyClass, self).__init__(name)
@loguse
def close(self):
"""Closes the test class."""
logging.getLogger(__name__).info("Closing %s" % (self.name))
@loguse
def my_function1(message):
logging.getLogger(__name__).warning("Starting %s" % (message))
return SubMyClass(message)
@loguse(1) # Don't log the variable with index 1 (i.e. two)
def my_function2(one, two):
logging.getLogger(__name__).warning(
"The previous line didn't log 'two', but did log 'one'"
)
return "three"
@loguse("a") # Don't log the named argument 'a'
def my_function3(a, b, g):
logging.getLogger(__name__).warning(
"The previous line didn't log 'a', but did log 'b' and 'g'."
)
return "abg"
@loguse("@") # Don't log the return value.
def my_function4(one, two):
logging.getLogger(__name__).warning(
"The previous line logged 'one' and 'two' but the next will not log the return value."
)
return "three"
def test_timings():
""" Testing the two timings functions. """
from suapp.logdecorator import add_timing
init_timings()
add_timing("f1", 3)
add_timing("f2", 4.234)
add_timing("f1", 6)
add_timing("f3", 6.2524)
add_timing("f2", 7.5234)
add_timing("f2", 8.70987)
add_timing("f2", 9.78034970)
timings = get_timings()
print(timings)
report = timings_report()
print(report)
assert report == OrderedDict(
[
("f2", (4, 30.2476197, 7.561904925)),
("f3", (1, 6.2524, 6.2524)),
("f1", (2, 9, 4.5)),
]
)
def test_timings_disabled():
""" Testing the two timings functions when they should do nothing. """
from suapp.logdecorator import add_timing
disable_timings()
add_timing("f1", 3)
add_timing("f2", 4.234)
add_timing("f1", 6)
add_timing("f3", 6.2524)
add_timing("f2", 7.5234)
add_timing("f2", 8.70987)
add_timing("f2", 9.78034970)
timings = get_timings()
print(timings)
report = timings_report()
print(report)
assert report == None
@pytest.fixture
def memory_logger():
""" Test fixture to set the handler to a memory buffer """
# Huge capacity and NEVER flush.
capacity = 10 ** 6
flushLevel = 255
if sys.version_info >= (3, 6):
handler = logging.handlers.MemoryHandler(
capacity, flushLevel, target=None, flushOnClose=False
)
else:
handler = logging.handlers.MemoryHandler(capacity, flushLevel, target=None)
logger = logging.getLogger()
# Cleaning up, just to be sure.
for h in logger.handlers:
logger.removeHandler(h)
logger.addHandler(handler)
return (logger, handler)
def test_logging_info(memory_logger):
""" Test the logging with loglevel=INFO """
(logger, handler) = memory_logger
logger.setLevel(logging.INFO)
x = my_function1("Hello World!")
x.lock()
x.unlock()
x.close()
my_function2("First variable", "Second variable")
my_function3(a="alpha", b="beta", g="gamma")
my_function4(one=1, two=2)
# fields = ['created', 'exc_info', 'exc_text', 'filename', 'funcName', 'getMessage', 'levelname', 'levelno', 'lineno', 'module', 'msecs', 'msg', 'name', 'pathname', 'process', 'processName', 'relativeCreated', 'stack_info', 'thread', 'threadName']
fields = ["levelno", "levelname", "name", "filename", "module", "funcName", "msg"]
expected = [
"%s WARNING %s test_logdecorator.py test_logdecorator my_function1 Starting Hello World!"
% (logging.WARNING, __name__),
"%s INFO %s test_logdecorator.py test_logdecorator lock Locking Hello World!"
% (logging.INFO, __name__),
"%s INFO %s test_logdecorator.py test_logdecorator unlock Unlocking Hello World!"
% (logging.INFO, __name__),
"%s INFO %s test_logdecorator.py test_logdecorator close Closing Hello World!"
% (logging.INFO, __name__),
"%s WARNING %s test_logdecorator.py test_logdecorator my_function2 The previous line didn't log 'two', but did log 'one'"
% (logging.WARNING, __name__),
"%s WARNING %s test_logdecorator.py test_logdecorator my_function3 The previous line didn't log 'a', but did log 'b' and 'g'."
% (logging.WARNING, __name__),
"%s WARNING %s test_logdecorator.py test_logdecorator my_function4 The previous line logged 'one' and 'two' but the next will not log the return value."
% (logging.WARNING, __name__),
]
assert len(handler.buffer) == len(expected)
for logline, expected in zip(handler.buffer, expected):
line = []
for field in fields:
# We substitute any hex strings with 0x000000000000
line.append(
re.sub("0x[0-9a-f]*", "0x000000000000", str(getattr(logline, field)))
)
assert " ".join(line) == expected
def test_logging_debug(memory_logger):
""" Test the logging with loglevel=DEBUG """
(logger, handler) = memory_logger
logger.setLevel(logging.DEBUG)
x = my_function1("The end of the World is near!")
x.lock()
x.unlock()
x.close()
my_function2("First variable", "Second variable")
my_function3(a="alpha", b="beta", g="gamma")
my_function4(one=1, two=2)
fields = ["levelno", "levelname", "name", "filename", "module", "funcName"]
expected = [
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator > my_function1(('The end of the World is near!',), {})"
% (logging.DEBUG),
"%s WARNING %s test_logdecorator.py test_logdecorator my_function1 Starting The end of the World is near!"
% (logging.WARNING, __name__),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator > SubMyClass.__init__((<test_logdecorator.SubMyClass object at 0x000000000000>, 'The end of the World is near!'), {})"
% (logging.DEBUG),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator > MyClass.__init__((<test_logdecorator.SubMyClass object at 0x000000000000>,), {})"
% (logging.DEBUG),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator < MyClass.__init__: None"
% (logging.DEBUG),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator < SubMyClass.__init__: None"
% (logging.DEBUG),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator < my_function1: <test_logdecorator.SubMyClass object at 0x000000000000>"
% (logging.DEBUG),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator > MyClass.lock((<test_logdecorator.SubMyClass object at 0x000000000000>,), {})"
% (logging.DEBUG),
"%s INFO %s test_logdecorator.py test_logdecorator lock Locking The end of the World is near!"
% (logging.INFO, __name__),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator < MyClass.lock: None"
% (logging.DEBUG),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator > MyClass.unlock((<test_logdecorator.SubMyClass object at 0x000000000000>,), {})"
% (logging.DEBUG),
"%s INFO %s test_logdecorator.py test_logdecorator unlock Unlocking The end of the World is near!"
% (logging.INFO, __name__),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator < MyClass.unlock: None"
% (logging.DEBUG),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator > SubMyClass.close((<test_logdecorator.SubMyClass object at 0x000000000000>,), {})"
% (logging.DEBUG),
"%s INFO %s test_logdecorator.py test_logdecorator close Closing The end of the World is near!"
% (logging.INFO, __name__),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator < SubMyClass.close: None"
% (logging.DEBUG),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator > my_function2(('First variable',), {})"
% (logging.DEBUG),
"%s WARNING %s test_logdecorator.py test_logdecorator my_function2 The previous line didn't log 'two', but did log 'one'"
% (logging.WARNING, __name__),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator < my_function2: 'three'"
% (logging.DEBUG),
(
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator > my_function3((), {'g': 'gamma', 'b': 'beta'})"
% (logging.DEBUG),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator > my_function3((), {'b': 'beta', 'g': 'gamma'})"
% (logging.DEBUG),
),
"%s WARNING %s test_logdecorator.py test_logdecorator my_function3 The previous line didn't log 'a', but did log 'b' and 'g'."
% (logging.WARNING, __name__),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator < my_function3: 'abg'"
% (logging.DEBUG),
(
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator > my_function4((), {'one': 1, 'two': 2})"
% (logging.DEBUG),
"%s DEBUG test_logdecorator logdecorator.py logdecorator decorator > my_function4((), {'two': 2, 'one': 1})"
% (logging.DEBUG),
),
"%s WARNING %s test_logdecorator.py test_logdecorator my_function4 The previous line logged 'one' and 'two' but the next will not log the return value."
% (logging.WARNING, __name__),
"%s DEBUG %s logdecorator.py logdecorator decorator < my_function4"
% (logging.DEBUG, __name__),
]
assert len(handler.buffer) == len(expected)
for logline, expected in zip(handler.buffer, expected):
line = []
for field in fields:
# We substitute any hex strings with 0x000000000000
line.append(
re.sub("0x[0-9a-f]*", "0x000000000000", str(getattr(logline, field)))
)
line.append(re.sub("0x[0-9a-f]*", "0x000000000000", logline.getMessage()))
if isinstance(expected, tuple):
assert " ".join(line) in expected
else:
assert " ".join(line) == expected
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.gather."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
_TEST_TYPES = (dtypes.int64, dtypes.float32,
dtypes.complex64, dtypes.complex128)
class GatherTest(test.TestCase, parameterized.TestCase):
def _buildParams(self, data, dtype):
data = data.astype(dtype.as_numpy_dtype)
# For complex types, add an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testScalar1D(self):
with self.cached_session(use_gpu=True):
data = np.array([0, 1, 2, 3, 7, 5])
for dtype in _TEST_TYPES:
for indices in 4, [1, 2, 2, 4, 5]:
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = self.evaluate(gather_t)
np_val = params_np[indices]
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
def testScalar2D(self):
with self.session(use_gpu=True):
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
for dtype in _TEST_TYPES:
for axis in range(data.ndim):
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices = constant_op.constant(2)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np.take(params_np, 2, axis=axis), gather_val)
expected_shape = data.shape[:axis] + data.shape[axis + 1:]
self.assertEqual(expected_shape, gather_t.get_shape())
def testSimpleTwoD32(self):
with self.session(use_gpu=True):
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
for dtype in _TEST_TYPES:
for axis in range(data.ndim):
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
# The indices must be in bounds for any axis.
indices = constant_op.constant([0, 1, 0, 2])
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np.take(params_np, [0, 1, 0, 2], axis=axis),
gather_val)
expected_shape = data.shape[:axis] + (4,) + data.shape[axis + 1:]
self.assertEqual(expected_shape, gather_t.get_shape())
@test_util.run_deprecated_v1
def testHigherRank(self):
# We check that scalar and empty indices shapes work as well
shape = (2, 1, 3, 2)
for indices_shape in (), (0,), (2, 0), (2, 3):
for dtype in _TEST_TYPES:
for axis in range(len(shape)):
params = self._buildParams(np.random.randn(*shape), dtype)
indices = np.random.randint(shape[axis], size=indices_shape)
with self.cached_session(use_gpu=True) as sess:
tf_params = constant_op.constant(params)
tf_indices = constant_op.constant(indices)
# Check that both positive and negative indices for axis work.
tf_axis = constant_op.constant(axis)
tf_negative_axis = constant_op.constant(-len(shape) + axis)
gather = array_ops.gather(tf_params, tf_indices, axis=tf_axis)
gather_negative_axis = array_ops.gather(
tf_params, tf_indices, axis=tf_negative_axis)
gather_value, gather_negative_axis_value = sess.run(
[gather, gather_negative_axis])
gather_np = np.take(params, indices, axis)
self.assertAllEqual(gather_np, gather_value)
self.assertAllEqual(gather_np, gather_negative_axis_value)
expected_shape = (params.shape[:axis] + indices.shape +
params.shape[axis + 1:])
self.assertEqual(expected_shape, gather.shape)
self.assertEqual(expected_shape, gather_negative_axis.shape)
# Test gradients
gather_grad = np.random.randn(
*gather.get_shape().as_list()).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
gather_grad -= 1j * gather_grad
params_grad, indices_grad, axis_grad = gradients_impl.gradients(
gather, [tf_params, tf_indices, tf_axis], gather_grad)
self.assertEqual(indices_grad, None)
self.assertEqual(axis_grad, None)
if dtype.is_integer:
self.assertEqual(params_grad, None)
continue
# For axis 0, we are able to create an efficient IndexedSlices for
# the gradient.
if axis == 0:
self.assertEqual(type(params_grad), ops.IndexedSlices)
params_grad = ops.convert_to_tensor(params_grad)
correct_params_grad = np.zeros(shape).astype(dtype.as_numpy_dtype)
outer_dims = axis
inner_dims = len(shape) - axis - 1
gather_grad = gather_grad.reshape(
shape[:axis] + (indices.size,) + shape[axis + 1:])
for source_index, dest_index in enumerate(indices.flat):
dest_slice = ((slice(None),) * outer_dims + (dest_index,) +
(slice(None),) * inner_dims)
source_slice = ((slice(None),) * outer_dims + (source_index,) +
(slice(None),) * inner_dims)
correct_params_grad[dest_slice] += gather_grad[source_slice]
self.assertAllClose(
correct_params_grad,
self.evaluate(params_grad),
atol=2e-6,
rtol=2e-6)
@test_util.run_deprecated_v1
def testString(self):
params = np.array([[b"asdf", b"zxcv"], [b"qwer", b"uiop"]])
with self.cached_session():
self.assertAllEqual([b"qwer", b"uiop"],
array_ops.gather(params, 1, axis=0).eval())
self.assertAllEqual([b"asdf", b"qwer"],
array_ops.gather(params, 0, axis=1).eval())
@test_util.run_deprecated_v1
def testUInt32AndUInt64(self):
for unsigned_type in (dtypes.uint32, dtypes.uint64):
params = self._buildParams(
np.array([[1, 2, 3], [7, 8, 9]]), unsigned_type)
with self.cached_session():
self.assertAllEqual([7, 8, 9],
array_ops.gather(params, 1, axis=0).eval())
self.assertAllEqual([1, 7], array_ops.gather(params, 0, axis=1).eval())
@test_util.run_deprecated_v1
def testUnknownIndices(self):
params = constant_op.constant([[0, 1, 2]])
indices = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices)
self.assertEqual(None, gather_t.get_shape())
@test_util.run_deprecated_v1
def testUnknownAxis(self):
params = constant_op.constant([[0, 1, 2]])
indices = constant_op.constant([[0, 0], [0, 0]])
axis = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices, axis=axis)
# Rank 2 params with rank 2 indices results in a rank 3 shape.
self.assertEqual([None, None, None], gather_t.shape.as_list())
# If indices is also unknown the result rank is unknown.
indices = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices, axis=axis)
self.assertEqual(None, gather_t.shape)
def testBadIndicesCPU(self):
with self.session(use_gpu=False):
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
array_ops.gather(params, [[7]], axis=0).eval()
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"):
array_ops.gather(params, [[7]], axis=1).eval()
def _disabledTestBadIndicesGPU(self):
# TODO disabled due to different behavior on GPU and CPU
# On GPU the bad indices do not raise error but fetch 0 values
if not test.is_gpu_available():
return
with self.session(use_gpu=True):
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
array_ops.gather(params, [[7]], axis=0).eval()
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"):
array_ops.gather(params, [[7]], axis=1).eval()
@test_util.run_deprecated_v1
@test_util.disable_xla(
"This test never passed for XLA") # Different error message.
def testBadAxis(self):
with self.session(use_gpu=True):
params = [0, 1, 2]
params_ph = array_ops.placeholder(dtypes.int32)
indices = 0
for bad_axis in (1, 2, -2):
# Shape inference can validate axis for known params rank.
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be at least rank . but is rank 1"):
array_ops.gather(params, indices, axis=bad_axis)
# If params rank is unknown, an op error occurs.
with self.assertRaisesOpError(
r"Expected axis in the range \[-1, 1\), but got %s" % bad_axis):
array_ops.gather(params_ph, indices, axis=bad_axis).eval(
feed_dict={params_ph: params})
@test_util.run_deprecated_v1
def testEmptySlices(self):
with self.session(use_gpu=True):
for dtype in _TEST_TYPES:
for itype in np.int32, np.int64:
# Leading axis gather.
params = np.zeros((7, 0, 0), dtype=dtype.as_numpy_dtype)
indices = np.array([3, 4], dtype=itype)
gather = array_ops.gather(params, indices, axis=0)
self.assertAllEqual(gather.eval(), np.zeros((2, 0, 0)))
# Middle axis gather.
params = np.zeros((0, 7, 0), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=1)
self.assertAllEqual(gather.eval(), np.zeros((0, 2, 0)))
# Trailing axis gather.
params = np.zeros((0, 0, 7), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=2)
self.assertAllEqual(gather.eval(), np.zeros((0, 0, 2)))
@parameterized.parameters([
# batch_dims=0 (equivalent to tf.gather)
dict( # 2D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[2, 1], [0, 3]],
expected=[[8, 7], [6, 9]]),
dict( # 3D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[9, 7], [8, 6]], [[6, 9], [8, 8]]]),
dict( # 4D indices
batch_dims=0,
params=[8, 9],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[8, 9], [9, 8]], [[8, 8], [9, 9]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
# batch_dims=indices.shape.ndims - 1 (equivalent to tf.batch_gather)
dict( # 2D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=2,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
dict( # 2D indices (1 batch dim)
batch_dims=-1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=-1,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
# 0 < batch_dims < indices.shape.ndims - 1
dict( # 3D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[13, 11], [12, 10]], [[20, 23], [22, 22]]]),
dict( # 4D indices (1 batch dim)
batch_dims=1,
params=[[6, 7], [8, 9]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[6, 7], [7, 6]], [[6, 6], [7, 7]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
dict( # 4D indices (2 batch dims)
batch_dims=2,
params=[[[2, 3], [4, 5]], [[6, 7], [8, 9]]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[2, 3], [3, 2]], [[4, 4], [5, 5]]],
[[[7, 7], [6, 6]], [[8, 9], [9, 8]]]]),
# axis > 0
dict( # 3D indices, batch_dims=1, axis=2
# params.shape = [I1, J1, J2] = [2, 2, 3]
# indices.shape = [I1, K1, K2] = [2, 1, 5]
# result.shape = [I1, J1, K1, K2] = [2, 2, 1, 5]
batch_dims=1,
axis=2,
params=[[[10, 11, 12], [13, 14, 15]], [[20, 21, 22], [23, 24, 25]]],
indices=[[[0, 1, 2, 1, 0]], [[0, 1, 2, 1, 0]]],
expected=[[[[10, 11, 12, 11, 10]], [[13, 14, 15, 14, 13]]],
[[[20, 21, 22, 21, 20]], [[23, 24, 25, 24, 23]]]]),
dict( # 3D indices, batch_dims=None, axis=1
batch_dims=None,
axis=1,
params=[[10, 11, 12], [13, 14, 15]],
indices=[1, 0],
expected=[[11, 10], [14, 13]]),
])
@test_util.run_in_graph_and_eager_modes
def testBatchDims(self, params, indices, batch_dims, expected=None,
axis=None):
result = array_ops.gather(params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=2,
output_shape=[2, 3, 8, 9, 10, 5, 6, 7]
# = params.shape[:2] + indices.shape[2:] + params.shape[3:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=3,
output_shape=[2, 3, 4, 8, 9, 10, 6, 7]
# = params.shape[:3] + indices.shape[2:] + params.shape[4:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=4,
output_shape=[2, 3, 4, 5, 8, 9, 10, 7]
# = params.shape[:4] + indices.shape[2:] + params.shape[5:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=5,
output_shape=[2, 3, 4, 5, 6, 8, 9, 10]
# = params.shape[:5] + indices.shape[2:] + params.shape[6:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-4,
output_shape=[2, 3, 8, 9, 10, 5, 6, 7]
# = params.shape[:2] + indices.shape[2:] + params.shape[3:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-3,
output_shape=[2, 3, 4, 8, 9, 10, 6, 7]
# = params.shape[:3] + indices.shape[2:] + params.shape[4:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-2,
output_shape=[2, 3, 4, 5, 8, 9, 10, 7]
# = params.shape[:4] + indices.shape[2:] + params.shape[5:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-1,
output_shape=[2, 3, 4, 5, 6, 8, 9, 10]
# = params.shape[:5] + indices.shape[2:] + params.shape[6:]
),
])
@test_util.run_in_graph_and_eager_modes
def testBatchDimsMatchesPythonBatching(self, params_shape, indices_shape,
batch_dims, axis, output_shape):
"""Checks that batch_dims matches multiple calls to tf.gather()."""
# Generate a `params` tensor with the indicated shape.
params_size = np.prod(params_shape)
params = np.reshape(np.arange(params_size), params_shape)
# Generate an `indices` tensor with the indicated shape, where each index
# is within the appropriate range.
indices_size = np.prod(indices_shape)
indices = np.reshape(np.arange(indices_size), indices_shape)
indices = indices % params_shape[axis]
# Perform repeated (batched) gather operations with numpy, to find the
# expected result.
expected = self._batchNumpyGather(params, indices, axis, batch_dims)
# On Windows, we get an exception if we pass in the transformed numpy
# arrays ("Failed to convert numpy ndarray to a Tensor (Unsupported
# feed type)."); so convert them back to lists before calling tf.gather.
params = params.tolist()
indices = indices.tolist()
result = array_ops.gather(params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(output_shape, result.shape.as_list())
self.assertAllEqual(expected, result)
def _batchNumpyGather(self, params, indices, axis, batch_dims):
"""Performs a batch gather by making recursive calls to np.take().
This is used by testBatchDims() to construct the expected value.
Args:
params: A numpy array
indices: A numpy array
axis: An integer
batch_dims: An integer
Returns:
A numpy array
"""
if batch_dims == 0:
return np.take(params, indices, axis=axis)
self.assertEqual(params.shape[0], indices.shape[0])
if axis > 0:
axis -= 1
return np.stack([
self._batchNumpyGather(params[i], indices[i], axis, batch_dims - 1)
for i in range(params.shape[0])
])
def testSkipEagerErrors(self):
if context.executing_eagerly():
return
with self.assertRaisesRegexp(ValueError, r"tf\.gather does not allow.*"):
array_ops.gather(
params=[1, 2],
batch_dims=1,
indices=array_ops.placeholder(dtypes.int32))
@test_util.run_in_graph_and_eager_modes
def testErrors(self):
with self.assertRaisesRegexp(
ValueError, r"batch_dims = 2 must be less than ndims\(indices\) = 2"):
array_ops.gather(
params=[[1, 2], [3, 4]], indices=[[1, 2], [3, 4]], batch_dims=2)
with self.assertRaisesRegexp(
ValueError, r"batch_dims = 1 must be less than ndims\(params\) = 1"):
array_ops.gather(
params=[1, 2, 3, 4], indices=[[1, 2], [3, 4]], batch_dims=1)
with self.assertRaisesRegexp(
ValueError, r"batch_dims = 1 must be less than or equal to axis = 0"):
array_ops.gather(
params=[[1, 2], [3, 4]],
indices=[[1, 2], [3, 4]],
batch_dims=1,
axis=0)
one = array_ops.ones((), dtypes.int32)
with self.assertRaisesRegexp(TypeError, "batch_dims must be an int"):
array_ops.gather(params=[[1]], indices=[[1]], batch_dims=one)
if __name__ == "__main__":
test.main()
| |
"""
File: model.py
Creates a Gaussian smoothed image of a jet.
"""
import matplotlib as mpl
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = 'cm'
mpl.rcParams['font.size'] = '22'
import matplotlib.pyplot as plt
import numpy as np
import math
import sys
from matplotlib import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.ndimage.filters import gaussian_filter as gf
i = 40.0 # degrees
i *= np.pi/180.0 # radians
beta = 0.9 # jet speed in units of c
d = 100.0 # Mpc; Angular distance between jet and observer
d *= 1.0e3 # kpc
psi0 = 2.0*np.pi/180.0 # radians
M = 1.0e10 # Msun; total mass of the equal-mass binary
Mdot = 1.0 # Eddington units
a0 = 8.3e-3*(M*1.0e-8)**(3./4.)*(Mdot**(-0.25)) # pc
pcto_10to16cm = 0.003241
a0 /= pcto_10to16cm # 1.0e16 cm
coeff = -2.56e5/(M*1.0e-8)**3
output_filename = 'jet_i%2d_beta%3.2f_mdot%3.2f_image' % (i*180.0/np.pi, beta, Mdot)
save_pdf = True
# case = 1, 2 zooms into the central region of the jet
case = int(sys.argv[1])
c = 3.0e5 # km/s; speed of light
yrbys = 3.154e7
kpcbykm = 3.086e16
def binary_separation_gw(t):
a = (4.0/coeff) * (t - t0 + coeff*a0**4/4.0)
a = a**(1./4.)
return a
def binary_orbital_period(a_16):
t = 1.72*(a_16**1.5)/np.sqrt(M*1.0e-8) # yr
return t
def half_opening_angle_intrinsic(a_16):
angle = np.arcsin(np.sin(psi0)*a0/a_16)
return angle #*180.0/np.pi # degrees
if case == 0:
t = np.logspace(-2.0,2.0,10000000)
output_filename += '_full'
elif case == 1:
t = np.logspace(-2.0,2.0,10000000)
t = t[6000000:9500000]
output_filename += '_zoom1'
elif case == 2:
t = np.linspace(1.0,2.0,10000)
output_filename += '_zoom2'
t0 = t[0]
def t_binary(time):
t_merge=abs(coeff)*a0**4/4.0
return np.abs(time-t_merge) # yr
def Omega(time):
# Angular velocity times time for jet precession
period = binary_orbital_period(binary_separation_gw(t_binary(time))) # yr
return 2.0*np.pi*time/period # radians
def vel(time):
# Geometry of figure 4 of paper.
psi = half_opening_angle_intrinsic(binary_separation_gw(t_binary(time)))
vx = beta*c*(np.sin(psi)*np.sin(i)*np.cos(Omega(time)) + np.cos(psi)*np.cos(i))
vy = beta*c*np.sin(psi)*np.sin(Omega(time))
vz = beta*c*(np.cos(psi)*np.sin(i)-np.sin(psi)*np.cos(i)*np.cos(Omega(time)))
return sign*vx, sign*vy, sign*vz # km/s
sign = 1 # forward jet
norm_sep = False
velx, vely, velz = vel(t)
y = vely*t*yrbys/kpcbykm # kpc
z = velz*t*yrbys/kpcbykm # kpc
y_obs = y/(1.0-velx/c)
z_obs = z/(1.0-velx/c)
phi_y_obs = y_obs/d * 180.0/np.pi * 3600.0 # arcsec
phi_z_obs = z_obs/d * 180.0/np.pi * 3600.0 # arcsec
alpha = 1.0
delta = 1.0
t_observed = t/(1.0-velx/c)
gamma = 1.0/np.sqrt(1.0-beta**2)
doppler = gamma*(1.0-beta)
intensity = doppler**(3.0+alpha-delta) * (t_observed**(-delta))
intensity = np.array([0.0 if x!=x else x for x in intensity])
if norm_sep:
max_intensity = intensity.max()
intensity /= max_intensity
intensity = np.array([np.log10(x) if x>0.0 else -2.0 for x in intensity])
phi_y_obs *= 1.0e3 # mas
phi_z_obs *= 1.0e3 # mas
sign = -1 # backward jet
velx, vely, velz = vel(t)
yb = vely*t*yrbys/kpcbykm # kpc
zb = velz*t*yrbys/kpcbykm # kpc
y_obsb = yb/(1.0-velx/c)
z_obsb = zb/(1.0-velx/c)
phi_y_obsb = y_obsb/d * 180.0/np.pi * 3600.0 # arcsec
phi_z_obsb = z_obsb/d * 180.0/np.pi * 3600.0 # arcsec
phi_y_obsb *= 1.0e3 # mas
phi_z_obsb *= 1.0e3 # mas
alpha = 1.0
delta = 1.0
t_observed = t/(1.0-velx/c)
gamma = 1.0/np.sqrt(1.0-beta**2)
doppler = gamma*(1.0-beta)
intensity_b = doppler**(3.0+alpha-delta) * (t_observed**(-delta))
intensity_b = np.array([0.0 if x!=x else x for x in intensity_b])
if norm_sep:
max_intensity_b = intensity_b.max()
intensity_b /= max_intensity_b
intensity_b = np.array([np.log10(x) if x>0.0 else -2.0 for x in intensity_b])
phi_z = np.concatenate([phi_z_obsb, phi_z_obs])
phi_y = np.concatenate([phi_y_obsb, phi_y_obs])
intensity = np.concatenate([intensity_b, intensity])
max_intensity = intensity.max()
intensity /= max_intensity
#intensity = np.array([np.log10(x) if x>0.0 else -2.0 for x in intensity])
nc = 1000
a = np.zeros((nc,nc),dtype=np.float32)
zl = phi_z.min()-5.0
zu = phi_z.max()+5.0
yl = phi_y.min()-5.0
yu = phi_y.max()+5.0
zl, zu = -10.0, 10.0
yl, yu = -5.0, 5.0
print zl, zu
print yl, yu
lz = zu - zl
ly = yu - yl
dy = ly/nc
dz = lz/nc
print dy, dz
def zloc(x):
return int((x-zl)/dz)# + 1
def yloc(x):
return int((x-yl)/dy)# + 1
for i in xrange(phi_z.size):
if phi_z[i] > zu or phi_z[i] < zl: continue
if phi_y[i] > yu or phi_y[i] < yl: continue
zpos = zloc(phi_z[i])
ypos = yloc(phi_y[i])
a[ypos, zpos] += abs(intensity[i])
fig = plt.figure(figsize=(7,7), dpi=100)
ax = fig.add_subplot(1, 1, 1)
a2 = gf(a, 10.0)
ylabels = range(int(yl),int(yu))
ylocs = [yloc(x) for x in ylabels]
ylabels = ['$'+str(x).strip()+'$' for x in ylabels]
zlabels = range(int(zl),int(zu),3)
zlocs = [zloc(x) for x in zlabels]
zlabels = ['$'+str(x).strip()+'$' for x in zlabels]
a2 /= a2.max()
s = plt.imshow(a2, cmap=cm.jet)
plt.yticks(ylocs, ylabels)
plt.xticks(zlocs, zlabels)
plt.xlabel('mas')
plt.ylabel('mas')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", "5%", pad="3%")
cb = plt.colorbar(s, cax=cax)
cb.set_label('intensity', labelpad=20)
cb.solids.set_edgecolor("face")
plt.savefig('jet_2mas.pdf',bbox_inches='tight')
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 NTT corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command-line flag library.
Emulates gflags by wrapping cfg.ConfigOpts.
The idea is to move fully to cfg eventually, and this wrapper is a
stepping stone.
"""
import socket
from oslo_config import cfg
from oslo_log import log as logging
from oslo_middleware import cors
from oslo_utils import netutils
CONF = cfg.CONF
logging.register_options(CONF)
core_opts = [
cfg.StrOpt('state_path',
default='/var/lib/cinder',
deprecated_name='pybasedir',
help="Top-level directory for maintaining cinder's state"), ]
CONF.register_cli_opts(core_opts)
global_opts = [
cfg.HostAddressOpt('my_ip',
default=netutils.get_my_ipv4(),
help='IP address of this host'),
cfg.ListOpt('glance_api_servers',
default=None,
help='A list of the URLs of glance API servers available to '
'cinder ([http[s]://][hostname|ip]:port). If protocol '
'is not specified it defaults to http.'),
cfg.IntOpt('glance_api_version',
default=2,
deprecated_for_removal=True,
deprecated_since="11.0.0",
deprecated_reason='Glance v1 support will be removed in Queens',
help='Version of the glance API to use'),
cfg.IntOpt('glance_num_retries',
min=0,
default=0,
help='Number retries when downloading an image from glance'),
cfg.BoolOpt('glance_api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance (https will be used but cert validation will '
'not be performed).'),
cfg.BoolOpt('glance_api_ssl_compression',
default=False,
help='Enables or disables negotiation of SSL layer '
'compression. In some cases disabling compression '
'can improve data throughput, such as when high '
'network bandwidth is available and you use '
'compressed image formats like qcow2.'),
cfg.StrOpt('glance_ca_certificates_file',
help='Location of ca certificates file to use for glance '
'client requests.'),
cfg.IntOpt('glance_request_timeout',
help='http/https timeout value for glance operations. If no '
'value (None) is supplied here, the glanceclient default '
'value is used.'),
cfg.BoolOpt('enable_v2_api',
default=True,
deprecated_for_removal=True,
help="DEPRECATED: Deploy v2 of the Cinder API."),
cfg.BoolOpt('enable_v3_api',
default=True,
help="Deploy v3 of the Cinder API."),
cfg.BoolOpt('api_rate_limit',
default=True,
help='Enables or disables rate limit of the API.'),
cfg.ListOpt('osapi_volume_ext_list',
default=[],
help='Specify list of extensions to load when using osapi_'
'volume_extension option with cinder.api.contrib.'
'select_extensions'),
cfg.MultiStrOpt('osapi_volume_extension',
default=['cinder.api.contrib.standard_extensions'],
help='osapi volume extension to load'),
cfg.StrOpt('volume_manager',
default='cinder.volume.manager.VolumeManager',
help='Full class name for the Manager for volume'),
cfg.StrOpt('backup_manager',
default='cinder.backup.manager.BackupManager',
help='Full class name for the Manager for volume backup'),
cfg.StrOpt('scheduler_manager',
default='cinder.scheduler.manager.SchedulerManager',
help='Full class name for the Manager for scheduler'),
cfg.HostAddressOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque '
'identifier. It is not necessarily a host name, '
'FQDN, or IP address.'),
# NOTE(vish): default to nova for compatibility with nova installs
cfg.StrOpt('storage_availability_zone',
default='nova',
help='Availability zone of this node. Can be overridden per '
'volume backend with the option '
'"backend_availability_zone".'),
cfg.StrOpt('default_availability_zone',
help='Default availability zone for new volumes. If not set, '
'the storage_availability_zone option value is used as '
'the default for new volumes.'),
cfg.BoolOpt('allow_availability_zone_fallback',
default=False,
help='If the requested Cinder availability zone is '
'unavailable, fall back to the value of '
'default_availability_zone, then '
'storage_availability_zone, instead of failing.'),
cfg.StrOpt('default_volume_type',
help='Default volume type to use'),
cfg.StrOpt('default_group_type',
help='Default group type to use'),
cfg.StrOpt('volume_usage_audit_period',
default='month',
help='Time period for which to generate volume usages. '
'The options are hour, day, month, or year.'),
cfg.StrOpt('rootwrap_config',
default='/etc/cinder/rootwrap.conf',
help='Path to the rootwrap configuration file to use for '
'running commands as root'),
cfg.BoolOpt('monkey_patch',
default=False,
help='Enable monkey patching'),
cfg.ListOpt('monkey_patch_modules',
default=[],
help='List of modules/decorators to monkey patch'),
cfg.IntOpt('service_down_time',
default=60,
help='Maximum time since last check-in for a service to be '
'considered up'),
cfg.StrOpt('volume_api_class',
default='cinder.volume.api.API',
help='The full class name of the volume API class to use'),
cfg.StrOpt('backup_api_class',
default='cinder.backup.api.API',
help='The full class name of the volume backup API class'),
cfg.StrOpt('auth_strategy',
default='keystone',
choices=['noauth', 'keystone'],
help='The strategy to use for auth. Supports noauth or '
'keystone.'),
cfg.ListOpt('enabled_backends',
help='A list of backend names to use. These backend names '
'should be backed by a unique [CONFIG] group '
'with its options'),
cfg.BoolOpt('no_snapshot_gb_quota',
default=False,
help='Whether snapshots count against gigabyte quota'),
cfg.StrOpt('transfer_api_class',
default='cinder.transfer.api.API',
help='The full class name of the volume transfer API class'),
cfg.StrOpt('consistencygroup_api_class',
default='cinder.consistencygroup.api.API',
help='The full class name of the consistencygroup API class'),
cfg.StrOpt('group_api_class',
default='cinder.group.api.API',
help='The full class name of the group API class'),
cfg.StrOpt('os_privileged_user_name',
help='OpenStack privileged account username. Used for requests '
'to other services (such as Nova) that require an account '
'with special rights.',
deprecated_for_removal=True,
deprecated_since="11.0.0",
deprecated_reason='Use the [nova] section for configuring '
'Keystone authentication for a privileged user.'),
cfg.StrOpt('os_privileged_user_password',
help='Password associated with the OpenStack privileged '
'account.',
deprecated_for_removal=True,
deprecated_since="11.0.0",
deprecated_reason='Use the [nova] section to configure '
'Keystone authentication for a privileged user.',
secret=True),
cfg.StrOpt('os_privileged_user_tenant',
help='Tenant name associated with the OpenStack privileged '
'account.',
deprecated_for_removal=True,
deprecated_since="11.0.0",
deprecated_reason='Use the [nova] section to configure '
'Keystone authentication for a privileged user.'),
cfg.URIOpt('os_privileged_user_auth_url',
help='Auth URL associated with the OpenStack privileged '
'account.',
deprecated_for_removal=True,
deprecated_since="11.0.0",
deprecated_reason='Use the [nova] section to configure '
'Keystone authentication for a privileged user.')
]
CONF.register_opts(core_opts)
CONF.register_opts(global_opts)
def set_middleware_defaults():
"""Update default configuration options for oslo.middleware."""
cors.set_defaults(
allow_headers=['X-Auth-Token',
'X-Identity-Status',
'X-Roles',
'X-Service-Catalog',
'X-User-Id',
'X-Tenant-Id',
'X-OpenStack-Request-ID',
'X-Trace-Info',
'X-Trace-HMAC',
'OpenStack-API-Version'],
expose_headers=['X-Auth-Token',
'X-Subject-Token',
'X-Service-Token',
'X-OpenStack-Request-ID',
'OpenStack-API-Version'],
allow_methods=['GET',
'PUT',
'POST',
'DELETE',
'PATCH',
'HEAD']
)
| |
"""
Support for Z-Wave.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zwave/
"""
import logging
import os.path
import time
from pprint import pprint
from homeassistant import bootstrap
from homeassistant.const import (
ATTR_BATTERY_LEVEL, ATTR_DISCOVERED, ATTR_ENTITY_ID, ATTR_LOCATION,
ATTR_SERVICE, CONF_CUSTOMIZE, EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP, EVENT_PLATFORM_DISCOVERED)
from homeassistant.util import convert, slugify
DOMAIN = "zwave"
REQUIREMENTS = ['pydispatcher==2.0.5']
CONF_USB_STICK_PATH = "usb_path"
DEFAULT_CONF_USB_STICK_PATH = "/zwaveusbstick"
CONF_DEBUG = "debug"
CONF_POLLING_INTERVAL = "polling_interval"
CONF_POLLING_INTENSITY = "polling_intensity"
# How long to wait for the zwave network to be ready.
NETWORK_READY_WAIT_SECS = 30
SERVICE_ADD_NODE = "add_node"
SERVICE_REMOVE_NODE = "remove_node"
SERVICE_HEAL_NETWORK = "heal_network"
SERVICE_SOFT_RESET = "soft_reset"
SERVICE_TEST_NETWORK = "test_network"
DISCOVER_SENSORS = "zwave.sensors"
DISCOVER_SWITCHES = "zwave.switch"
DISCOVER_LIGHTS = "zwave.light"
DISCOVER_BINARY_SENSORS = 'zwave.binary_sensor'
DISCOVER_THERMOSTATS = 'zwave.thermostat'
DISCOVER_HVAC = 'zwave.hvac'
DISCOVER_LOCKS = 'zwave.lock'
EVENT_SCENE_ACTIVATED = "zwave.scene_activated"
COMMAND_CLASS_SWITCH_MULTILEVEL = 38
COMMAND_CLASS_DOOR_LOCK = 98
COMMAND_CLASS_SWITCH_BINARY = 37
COMMAND_CLASS_SENSOR_BINARY = 48
COMMAND_CLASS_SENSOR_MULTILEVEL = 49
COMMAND_CLASS_METER = 50
COMMAND_CLASS_BATTERY = 128
COMMAND_CLASS_ALARM = 113 # 0x71
COMMAND_CLASS_THERMOSTAT_SETPOINT = 67 # 0x43
COMMAND_CLASS_THERMOSTAT_FAN_MODE = 68 # 0x44
GENRE_WHATEVER = None
GENRE_USER = "User"
TYPE_WHATEVER = None
TYPE_BYTE = "Byte"
TYPE_BOOL = "Bool"
TYPE_DECIMAL = "Decimal"
# List of tuple (DOMAIN, discovered service, supported command classes,
# value type).
DISCOVERY_COMPONENTS = [
('sensor',
DISCOVER_SENSORS,
[COMMAND_CLASS_SENSOR_MULTILEVEL,
COMMAND_CLASS_METER,
COMMAND_CLASS_ALARM],
TYPE_WHATEVER,
GENRE_USER),
('light',
DISCOVER_LIGHTS,
[COMMAND_CLASS_SWITCH_MULTILEVEL],
TYPE_BYTE,
GENRE_USER),
('switch',
DISCOVER_SWITCHES,
[COMMAND_CLASS_SWITCH_BINARY],
TYPE_BOOL,
GENRE_USER),
('binary_sensor',
DISCOVER_BINARY_SENSORS,
[COMMAND_CLASS_SENSOR_BINARY],
TYPE_BOOL,
GENRE_USER),
('thermostat',
DISCOVER_THERMOSTATS,
[COMMAND_CLASS_THERMOSTAT_SETPOINT],
TYPE_WHATEVER,
GENRE_WHATEVER),
('hvac',
DISCOVER_HVAC,
[COMMAND_CLASS_THERMOSTAT_FAN_MODE],
TYPE_WHATEVER,
GENRE_WHATEVER),
('lock',
DISCOVER_LOCKS,
[COMMAND_CLASS_DOOR_LOCK],
TYPE_BOOL,
GENRE_USER),
]
ATTR_NODE_ID = "node_id"
ATTR_VALUE_ID = "value_id"
ATTR_SCENE_ID = "scene_id"
NETWORK = None
_LOGGER = logging.getLogger(__name__)
def _obj_to_dict(obj):
"""Convert an object into a hash for debug."""
return {key: getattr(obj, key) for key
in dir(obj)
if key[0] != '_' and not hasattr(getattr(obj, key), '__call__')}
def _node_name(node):
"""Return the name of the node."""
return node.name or "{} {}".format(
node.manufacturer_name, node.product_name)
def _value_name(value):
"""Return the name of the value."""
return "{} {}".format(_node_name(value.node), value.label)
def _object_id(value):
"""Return the object_id of the device value.
The object_id contains node_id and value instance id
to not collide with other entity_ids.
"""
object_id = "{}_{}".format(slugify(_value_name(value)),
value.node.node_id)
# Add the instance id if there is more than one instance for the value
if value.instance > 1:
return "{}_{}".format(object_id, value.instance)
return object_id
def nice_print_node(node):
"""Print a nice formatted node to the output (debug method)."""
node_dict = _obj_to_dict(node)
node_dict['values'] = {value_id: _obj_to_dict(value)
for value_id, value in node.values.items()}
print("\n\n\n")
print("FOUND NODE", node.product_name)
pprint(node_dict)
print("\n\n\n")
def get_config_value(node, value_index):
"""Return the current configuration value for a specific index."""
try:
for value in node.values.values():
# 112 == config command class
if value.command_class == 112 and value.index == value_index:
return value.data
except RuntimeError:
# If we get an runtime error the dict has changed while
# we was looking for a value, just do it again
return get_config_value(node, value_index)
# pylint: disable=R0914
def setup(hass, config):
"""Setup Z-Wave.
Will automatically load components to support devices found on the network.
"""
# pylint: disable=global-statement, import-error
global NETWORK
try:
import libopenzwave
except ImportError:
_LOGGER.error("You are missing required dependency Python Open "
"Z-Wave. Please follow instructions at: "
"https://home-assistant.io/components/zwave/")
return False
from pydispatch import dispatcher
from openzwave.option import ZWaveOption
from openzwave.network import ZWaveNetwork
default_zwave_config_path = os.path.join(os.path.dirname(
libopenzwave.__file__), 'config')
# Load configuration
use_debug = str(config[DOMAIN].get(CONF_DEBUG)) == '1'
customize = config[DOMAIN].get(CONF_CUSTOMIZE, {})
# Setup options
options = ZWaveOption(
config[DOMAIN].get(CONF_USB_STICK_PATH, DEFAULT_CONF_USB_STICK_PATH),
user_path=hass.config.config_dir,
config_path=config[DOMAIN].get('config_path',
default_zwave_config_path),)
options.set_console_output(use_debug)
options.lock()
NETWORK = ZWaveNetwork(options, autostart=False)
if use_debug:
def log_all(signal, value=None):
"""Log all the signals."""
print("")
print("SIGNAL *****", signal)
if value and signal in (ZWaveNetwork.SIGNAL_VALUE_CHANGED,
ZWaveNetwork.SIGNAL_VALUE_ADDED):
pprint(_obj_to_dict(value))
print("")
dispatcher.connect(log_all, weak=False)
def value_added(node, value):
"""Called when a value is added to a node on the network."""
for (component,
discovery_service,
command_ids,
value_type,
value_genre) in DISCOVERY_COMPONENTS:
if value.command_class not in command_ids:
continue
if value_type is not None and value_type != value.type:
continue
if value_genre is not None and value_genre != value.genre:
continue
# Ensure component is loaded
bootstrap.setup_component(hass, component, config)
# Configure node
name = "{}.{}".format(component, _object_id(value))
node_config = customize.get(name, {})
polling_intensity = convert(
node_config.get(CONF_POLLING_INTENSITY), int)
if polling_intensity:
value.enable_poll(polling_intensity)
else:
value.disable_poll()
# Fire discovery event
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: discovery_service,
ATTR_DISCOVERED: {
ATTR_NODE_ID: node.node_id,
ATTR_VALUE_ID: value.value_id,
}
})
def scene_activated(node, scene_id):
"""Called when a scene is activated on any node in the network."""
name = _node_name(node)
object_id = "{}_{}".format(slugify(name), node.node_id)
hass.bus.fire(EVENT_SCENE_ACTIVATED, {
ATTR_ENTITY_ID: object_id,
ATTR_SCENE_ID: scene_id
})
dispatcher.connect(
value_added, ZWaveNetwork.SIGNAL_VALUE_ADDED, weak=False)
dispatcher.connect(
scene_activated, ZWaveNetwork.SIGNAL_SCENE_EVENT, weak=False)
def add_node(event):
"""Switch into inclusion mode."""
NETWORK.controller.begin_command_add_device()
def remove_node(event):
"""Switch into exclusion mode."""
NETWORK.controller.begin_command_remove_device()
def heal_network(event):
"""Heal the network."""
NETWORK.heal()
def soft_reset(event):
"""Soft reset the controller."""
NETWORK.controller.soft_reset()
def test_network(event):
"""Test the network by sending commands to all the nodes."""
NETWORK.test()
def stop_zwave(event):
"""Stop Z-Wave."""
NETWORK.stop()
def start_zwave(event):
"""Startup Z-Wave."""
NETWORK.start()
# Need to be in STATE_AWAKED before talking to nodes.
# Wait up to NETWORK_READY_WAIT_SECS seconds for the zwave network
# to be ready.
for i in range(NETWORK_READY_WAIT_SECS):
_LOGGER.info(
"network state: %d %s", NETWORK.state, NETWORK.state_str)
if NETWORK.state >= NETWORK.STATE_AWAKED:
_LOGGER.info("zwave ready after %d seconds", i)
break
time.sleep(1)
else:
_LOGGER.warning(
"zwave not ready after %d seconds, continuing anyway",
NETWORK_READY_WAIT_SECS)
_LOGGER.info(
"final network state: %d %s", NETWORK.state, NETWORK.state_str)
polling_interval = convert(
config[DOMAIN].get(CONF_POLLING_INTERVAL), int)
if polling_interval is not None:
NETWORK.set_poll_interval(polling_interval, False)
poll_interval = NETWORK.get_poll_interval()
_LOGGER.info("zwave polling interval set to %d ms", poll_interval)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_zwave)
# Register add / remove node services for Z-Wave sticks without
# hardware inclusion button
hass.services.register(DOMAIN, SERVICE_ADD_NODE, add_node)
hass.services.register(DOMAIN, SERVICE_REMOVE_NODE, remove_node)
hass.services.register(DOMAIN, SERVICE_HEAL_NETWORK, heal_network)
hass.services.register(DOMAIN, SERVICE_SOFT_RESET, soft_reset)
hass.services.register(DOMAIN, SERVICE_TEST_NETWORK, test_network)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_zwave)
return True
class ZWaveDeviceEntity:
"""Representation of a Z-Wave node entity."""
def __init__(self, value, domain):
"""Initialize the z-Wave device."""
self._value = value
self.entity_id = "{}.{}".format(domain, self._object_id())
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Return an unique ID."""
return "ZWAVE-{}-{}".format(self._value.node.node_id,
self._value.object_id)
@property
def name(self):
"""Return the name of the device."""
return _value_name(self._value)
def _object_id(self):
"""Return the object_id of the device value.
The object_id contains node_id and value instance id to not collide
with other entity_ids.
"""
return _object_id(self._value)
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attrs = {
ATTR_NODE_ID: self._value.node.node_id,
}
battery_level = self._value.node.get_battery_level()
if battery_level is not None:
attrs[ATTR_BATTERY_LEVEL] = battery_level
location = self._value.node.location
if location:
attrs[ATTR_LOCATION] = location
return attrs
| |
import os
import sys
sys.setrecursionlimit(200)
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import tensorflow.contrib.distributions as distributions
from tensorflow.examples.tutorials.mnist import input_data
from svhn_class import svhn, SVHN_Class, SVHN
from mnist_number import MNIST_Number, full_mnist, AllMnist
from lifelong_vae import VAE
from vanilla_vae import VanillaVAE
from encoders import DenseEncoder, CNNEncoder
from decoders import CNNDecoder
from utils import *
flags = tf.flags
flags.DEFINE_bool("sequential", 0, "sequential or not")
flags.DEFINE_integer("latent_size", 20, "Number of latent variables.")
flags.DEFINE_integer("epochs", 100, "Maximum number of epochs [for non sequential].")
flags.DEFINE_integer("batch_size", 100, "Mini-batch size for data subsampling.")
flags.DEFINE_integer("min_interval", 3000, "Minimum interval for specific dataset.")
flags.DEFINE_integer("max_dist_swaps", 32, "Maximum number of different distributions to sample from.")
flags.DEFINE_string("device", "/gpu:0", "Compute device.")
flags.DEFINE_boolean("allow_soft_placement", True, "Soft device placement.")
flags.DEFINE_float("device_percentage", "0.3", "Amount of memory to use on device.")
flags.DEFINE_boolean("use_ln", False, "use layer norm")
flags.DEFINE_boolean("use_bn", False, "use batch norm")
flags.DEFINE_string("reparam_type", "continuous", "reparameterization type for vanilla VAE")
flags.DEFINE_float("learning_rate", 1e-3, "learning rate")
flags.DEFINE_float("mutual_info_reg", 0.0, "coefficient of mutual information [0 disables]")
flags.DEFINE_string("base_dir", ".", "base dir to store experiments")
flags.DEFINE_bool("rotate_svhn", 0, "if true adds 10x+1 rotated versions of SVHN [for seq only]")
flags.DEFINE_bool("compress_rotations", 0, "if true doesn't add a new class for rotations")
FLAGS = flags.FLAGS
# Global variables
GLOBAL_ITER = 0 # keeps track of the iteration ACROSS models
TRAIN_ITER = 0 # the iteration of the current model
TEST_SET_SVHN = svhn.test
TEST_SET_MNIST = input_data.read_data_sets('MNIST_data', one_hot=True).test
TEST_SET_MNIST._images = TEST_SET_MNIST._images.reshape([-1, 28, 28])
TEST_SET_MNIST._images = MNIST_Number.resize_images(TEST_SET_MNIST._images, [32, 32])
TEST_SET_MNIST._images = MNIST_Number.bw_to_rgb(TEST_SET_MNIST._images)
def _build_latest_base_dir(base_name):
current_index = _find_latest_experiment_number(base_name) + 1
experiment_name = base_name + "_%d" % current_index
os.makedirs(experiment_name)
return experiment_name
def _find_latest_experiment_number(base_name):
current_index = 0
while os.path.isdir(base_name + "_%d" % current_index):
current_index += 1
return -1 if current_index == 0 else current_index - 1
def build_Nd_vae(sess, source, input_shape, latent_size,
batch_size, epochs=100):
base_name = os.path.join(FLAGS.base_dir, "experiment")
print 'base_name = ', base_name
current_model = _find_latest_experiment_number(base_name)
if current_model != -1:
print "\nWARNING: old experiment found, but restoring is currently bugged, training new..\n"
base_name = base_name + "_%d" % (current_model + 1)
latest_model= (None, 0)
# base_name = base_name + "_%d" % current_model
# latest_model = find_latest_file("%s/models" % base_name, "vae(\d+)")
else:
base_name = _build_latest_base_dir(base_name)
latest_model = (None, 0)
print 'base name: ', base_name, '| latest model = ', latest_model
# our placeholders are generated externally
is_training = tf.placeholder(tf.bool)
x = tf.placeholder(tf.float32, shape=[FLAGS.batch_size] + list(input_shape),
name="input_placeholder")
# build encoder and decoder models
# note: these can be externally built
# as long as it works with forward()
latent_size = 2*FLAGS.latent_size + 1 if FLAGS.sequential \
else 2*FLAGS.latent_size
encoder = CNNEncoder(sess, latent_size,
is_training,
use_ln=FLAGS.use_ln,
use_bn=FLAGS.use_bn)
# decoder_latent_size = FLAGS.latent_size + 1 if FLAGS.sequential \
# else FLAGS.latent_size
decoder = CNNDecoder(sess,
input_size=input_shape,
is_training=is_training,
double_channels=False,
use_ln=FLAGS.use_ln,
use_bn=FLAGS.use_bn)
print 'encoder = ', encoder.get_info()
print 'decoder = ', decoder.get_info()
# build the vae object
VAEObj = VAE if FLAGS.sequential else VanillaVAE
vae = VAEObj(sess, x, input_size=input_shape,
batch_size=FLAGS.batch_size,
latent_size=FLAGS.latent_size,
discrete_size=1,
p_x_given_z_func=distributions.Bernoulli,
encoder=encoder, decoder=decoder,
is_training=is_training,
learning_rate=FLAGS.learning_rate,
submodel=latest_model[1],
img_shape=[32, 32, 3],
vae_tm1=None, base_dir=base_name,
mutual_info_reg=FLAGS.mutual_info_reg)
model_filename = "%s/models/%s" % (base_name, latest_model[0])
is_forked = False
if os.path.isfile(model_filename):
vae.restore()
else:
sess.run([tf.global_variables_initializer(),
tf.local_variables_initializer()])
# contain all the losses for runs
mean_loss_mnist = []
mean_elbo_mnist = []
mean_recon_mnist = []
mean_latent_mnist = []
mean_loss_svhn = []
mean_elbo_svhn = []
mean_recon_svhn = []
mean_latent_svhn = []
try:
if not FLAGS.sequential:
vae.train(source[0], batch_size, display_step=1,
training_epochs=epochs)
# eval MNIST
mean_t, mean_elbo_t, mean_recon_t, mean_latent_t, \
_, _, _, _\
= evaluate_test_losses(sess, vae,
batch_size, TEST_SET_MNIST)
mean_loss_mnist += [mean_t]
mean_latent_mnist += [mean_latent_t]
mean_recon_mnist += [mean_recon_t]
mean_elbo_mnist += [mean_elbo_t]
# eval SVHN
mean_t, mean_elbo_t, mean_recon_t, mean_latent_t, \
_, _, _, _\
= evaluate_test_losses(sess, vae,
batch_size, TEST_SET_SVHN)
mean_loss_svhn += [mean_t]
mean_latent_svhn += [mean_latent_t]
mean_recon_svhn += [mean_recon_t]
mean_elbo_svhn += [mean_elbo_t]
else:
current_model = 0
total_iter = 0
all_models = [(current_model, source[current_model].number)]
while True:
# fork if we get a new model
prev_model = current_model
# test our model every 100 iterations
if total_iter % 200 == 0:
vae.test(TEST_SET_SVHN, batch_size)
vae.test(TEST_SET_MNIST, batch_size)
inputs, outputs, indexes, current_model \
= generate_train_data(source,
batch_size,
batch_size,
current_model)
# Distribution shift Swapping logic
if prev_model != current_model:
# save away the current test set loss
mean_t, mean_elbo_t, mean_recon_t, mean_latent_t, \
_, _, _, _\
= evaluate_test_losses(sess, vae,
batch_size,
TEST_SET_MNIST)
mean_loss_mnist += [mean_t]
mean_elbo_mnist += [mean_elbo_t]
mean_latent_mnist += [mean_latent_t]
mean_recon_mnist += [mean_recon_t]
mean_t, mean_elbo_t, mean_recon_t, mean_latent_t, \
_, _, _, _\
= evaluate_test_losses(sess, vae,
batch_size,
TEST_SET_SVHN)
mean_loss_svhn += [mean_t]
mean_elbo_svhn += [mean_elbo_t]
mean_latent_svhn += [mean_latent_t]
mean_recon_svhn += [mean_recon_t]
# for the purposes of this experiment we end
# if we reach max_dist_swaps
if len(all_models) >= FLAGS.max_dist_swaps:
print '\ntrained %d models, exiting\n' \
% FLAGS.max_dist_swaps
break
# add a new discrete index if we haven't seen this distr yet
# if we compress, we just check to see if the "true" number is in the set
if FLAGS.compress_rotations:
# current_model_perms = set([current_model] + [i for i in range(len(source))])
all_true_models = [i[1] for i in all_models]
num_new_class = 1 if source[current_model].number \
not in all_true_models else 0
print 'detected %s, prev = %s, num_new_class = %d'\
% (str(source[current_model].number),
str(all_true_models), num_new_class)
else:
# just dont add dupes based on the number
all_models_index = [i[0] for i in all_models]
num_new_class = 1 if current_model not in all_models_index else 0
vae = vae.fork(num_new_class)
is_forked = True # holds the first fork has been done [spawn student]
# keep track of all models (and the TRUE model)
# this is separated because the true model
# might not be the same (eg: rotations)
all_models.append((current_model,
source[current_model].number))
for start, end in zip(range(0, len(inputs) + 1, batch_size),
range(batch_size, len(inputs) + 1, batch_size)):
x = inputs[start:end]
loss, elbo, rloss, lloss = vae.partial_fit(x, is_forked=is_forked)
print 'loss[total_iter=%d][iter=%d][model=%d] = %f, elbo loss = %f, latent loss = %f, reconstr loss = %f' \
% (total_iter, vae.iteration, current_model, loss, elbo, lloss,
rloss if rloss is not None else 0.0)
total_iter += 1
except KeyboardInterrupt:
print "caught keyboard exception..."
vae.save()
if FLAGS.sequential:
np.savetxt("%s/models/class_list.csv" % vae.base_dir,
all_models,
delimiter=",")
print 'All seen models: ', all_models
write_all_losses(vae.base_dir, mean_loss_svhn,
mean_elbo_svhn, mean_recon_svhn,
mean_latent_svhn, prefix="svhn_")
write_all_losses(vae.base_dir, mean_loss_mnist,
mean_elbo_mnist, mean_recon_mnist,
mean_latent_mnist, prefix="mnist_")
return vae
# show clustering in 2d
def plot_2d_vae(sess, x_sample, y_sample, vae, batch_size):
x_sample = np.asarray(x_sample)
y_sample = np.asarray(y_sample)
print 'xs = ', x_sample.shape, ' | ys = ', y_sample.shape
z_mu = []
for start, end in zip(range(0, y_sample.shape[0] + 1, batch_size), \
range(batch_size, y_sample.shape[0] + 1, batch_size)):
z_mu.append(vae.transform(x_sample[start:end]))
z_mu = np.vstack(z_mu)
# z_mu, c = reject_outliers(np.vstack(z_mu), np.argmax(y_sample, 1))
# print 'zmus = ', z_mu.shape, ' c = ', c.shape
plt.figure(figsize=(8, 6))
# plt.ylim(-0.25, 0.25)
# plt.xlim(-0.25, 0.25)
# plt.scatter(z_mu[:, 0], z_mu[:, 1], c=c) # for reject_outliers
c = np.argmax(y_sample, 1) if len(y_sample.shape) > 1 else y_sample
plt.scatter(z_mu[:, 0], z_mu[:, 1], c=c)
plt.colorbar()
plt.savefig("%s/imgs/2d_cluster_%s.png" % (vae.base_dir, vae.get_name()),
bbox_inches='tight')
plt.show()
def _write_images(x_sample, x_reconstruct, vae_name, filename,
num_print=5, sup_title=None):
fig = plt.figure(figsize=(8, 12))
if sup_title:
fig.suptitle(sup_title)
for i in range(num_print):
if x_sample is not None:
plt.subplot(num_print, 2, 2*i + 1)
plt.imshow(x_sample[i].reshape(32, 32, 3))#, vmin=0, vmax=1)
plt.title("Test input")
plt.colorbar()
plt.subplot(num_print, 2, 2*i + 2)
plt.imshow(x_reconstruct[i].reshape(32, 32, 3))#, vmin=0, vmax=1)
plt.title("Reconstruction")
plt.colorbar()
plt.savefig(filename, bbox_inches='tight')
plt.close()
def generate_random_categorical(num_targets, batch_size):
indices = np.random.randint(0, high=num_targets, size=batch_size)
return one_hot(num_targets, indices)
def plot_ND_vae_consistency(sess, vae, batch_size, num_write=3):
disc = one_hot(vae.num_discrete, np.arange(vae.num_discrete))
for row in disc:
rnd_normal = np.random.normal(size=[vae.batch_size,
vae.latent_size])
z = np.hstack([rnd_normal,
np.tile(row, (vae.batch_size, 1))])
generated = vae.generate(z=z)
for i in range(num_write):
current_gen_str = 'discrete_index' + str(np.argmax(row))
plt.figure()
plt.title(current_gen_str)
plt.imshow(generated[i].reshape(32, 32, 3))#, vmin=0, vmax=1)
plt.colorbar()
plt.savefig("%s/imgs/vae_%d_consistency_%s_num%d.png"
% (vae.base_dir,
vae.submodel,
current_gen_str,
i),
bbox_inches='tight')
plt.close()
def plot_ND_vae_inference(sess, vae, batch_size, num_write=10):
z_generated = generate_random_categorical(FLAGS.latent_size, batch_size)
vae_i = vae
current_vae = 0
while vae_i is not None: # do this for all the forked VAE's
x_reconstruct = vae_i.generate(z_mu=z_generated)
for x, z in zip(x_reconstruct[0:num_write], z_generated[0:num_write]):
# current_pred_str = '_'.join(map(str, index_of_generation))
current_pred_str = '_atindex' + str(np.argwhere(z)[0][0])
plt.figure()
plt.title(current_pred_str)
plt.imshow(x.reshape(32, 32, 3))#, vmin=0, vmax=1)
plt.colorbar()
plt.savefig("%s/imgs/vae_%d_inference_%s.png" % (vae_i.base_dir,
current_vae,
current_pred_str),
bbox_inches='tight')
print 'z_generated[vae# %d] = %s' % (current_vae, current_pred_str)
vae_i = vae_i.vae_tm1
current_vae += 1
def smooth_interpolate_latent_space(sess, vae, prefix=""):
nx = ny = 20
x_values = np.linspace(-3, 3, nx)
y_values = np.linspace(-3, 3, ny)
for current_disc in xrange(vae.num_discrete):
canvas = np.empty((32*ny, 32*nx, 3))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
z_mu = np.array([[xi, yi]]*vae.batch_size)
z_disc = one_hot(vae.num_discrete, [current_disc]*vae.batch_size)
z = np.hstack([z_mu, z_disc])
x_mean = vae.generate(z)
canvas[(nx-i-1)*32:(nx-i)*32, j*32:(j+1)*32, :] = x_mean[0].reshape(32, 32, 3)
plt.figure(figsize=(8, 10))
Xi, Yi = np.meshgrid(x_values, y_values)
plt.imshow(canvas, origin="upper", cmap="gray")
plt.tight_layout()
plt.savefig("%s/imgs/%sinterpolation_discrete%d.png" % (vae.base_dir,
prefix,
current_disc))
plt.close()
def write_csv(arr, base_dir, filename):
with open("%s/%s" % (base_dir, filename), 'a') as f:
np.savetxt(f, arr, delimiter=",")
def evaluate_test_losses(sess, vae, batch_size, test_set):
num_test = test_set.num_examples
num_batches = 0.
loss_t = []
elbo_t = []
recon_loss_t = []
latent_loss_t = []
# run over our batch size and accumulate the error
for begin, end in zip(xrange(0, num_test, batch_size),
xrange(batch_size, num_test+1, batch_size)):
minibatch = test_set.images[begin:end]
_, _, recon_loss_mean, \
_, latent_kl_mean, \
_, cost_mean, elbo_mean \
= vae.reconstruct(minibatch,
return_losses=True)
recon_loss_t.append(recon_loss_mean)
latent_loss_t.append(latent_kl_mean)
elbo_t.append(elbo_mean)
loss_t.append(cost_mean)
num_batches += 1
# average over the number of minibatches
loss_t = np.squeeze(np.asarray(loss_t))
elbo_t = np.squeeze(np.asarray(elbo_t))
recon_loss_t = np.squeeze(np.asarray(recon_loss_t))
latent_loss_t = np.squeeze(np.asarray(latent_loss_t))
mean_loss = np.sum(loss_t) * (1.0 / num_batches)
mean_elbo = np.sum(elbo_t) * (1.0 / num_batches)
mean_recon_loss = np.sum(recon_loss_t) * (1.0 / num_batches)
mean_latent_loss = np.sum(latent_loss_t) * (1.0 / num_batches)
submodel = vae.submodel if FLAGS.sequential else 0
print 'Mean losses [VAE %d] = Loss: %f | ELBO: %f | Reconstruction: %f | LatentKL: %f'\
% (submodel, mean_loss, mean_elbo, mean_recon_loss, mean_latent_loss)
return mean_loss, mean_elbo, mean_recon_loss, mean_latent_loss,\
loss_t, elbo_t, recon_loss_t, latent_loss_t
def write_all_losses(base_dir, loss_t, elbo_t, recon_loss_t,
latent_loss_t, prefix="mnist_"):
# write_csv(np.array([mean_loss]),
# base_dir,
# "models/test_loss_mean.csv")
# write_csv(np.array([mean_recon_loss]),
# base_dir,
# "models/test_recon_loss_mean.csv")
# write_csv(np.array([mean_latent_loss]),
# base_dir,
# "models/test_latent_loss_mean.csv")
write_csv(loss_t, base_dir, "models/%stest_loss.csv" % prefix)
write_csv(elbo_t, base_dir, "models/%stest_elbo.csv" % prefix)
write_csv(recon_loss_t, base_dir, "models/%stest_recon_loss.csv" % prefix)
write_csv(latent_loss_t, base_dir, "models/%stest_latent_loss.csv" % prefix)
def plot_Nd_vae(sess, source, vae, batch_size, test_set, prefix="mnist_"):
if not FLAGS.sequential:
x_sample = source[0].test.next_batch(batch_size)[0]
x_reconstruct = vae.reconstruct(x_sample)
elif FLAGS.sequential:
x_sample = test_set.next_batch(batch_size)[0]
x_reconstruct = vae.reconstruct(x_sample)
x_reconstruct_tm1 = []
vae_tm1 = vae.vae_tm1
while vae_tm1 is not None:
x_reconstruct_tm1.append([vae_tm1.reconstruct(x_sample),
vae_tm1.get_name()])
vae_tm1 = vae_tm1.vae_tm1
# write base
_write_images(x_sample, x_reconstruct, vae.get_name(),
filename="%s/imgs/%s_reconstr_%s.png" % (vae.base_dir,
prefix,
vae.get_name()))
# write all recursive
if FLAGS.sequential:
for x_r_tm1, name_tm1 in x_reconstruct_tm1:
_write_images(x_sample, x_r_tm1, name_tm1,
filename="%s/imgs/%s_reconstr_%s.png"
% (vae.base_dir, prefix, name_tm1))
def create_indexes(num_train, num_models, current_model):
global TRAIN_ITER
global GLOBAL_ITER
if np.random.randint(0, FLAGS.batch_size * 13) == 2 \
and TRAIN_ITER > FLAGS.min_interval: # XXX: const 5k
#current_model = np.random.randint(0, num_models)
current_model = current_model + 1 if current_model < num_models - 1 else 0
TRAIN_ITER = 0
GLOBAL_ITER += 1
TRAIN_ITER += 1
return current_model, [current_model] * num_train
def _generate_from_index(generators, gen_indexes):
try:
full_data = [generators[t].get_batch_iter(1) for t in gen_indexes]
inputs = np.vstack([t[0] for t in full_data])
outputs = np.vstack([t[1] for t in full_data])
return inputs, outputs, gen_indexes
except Exception as e:
print 'caught exception in gen_from_index: ', e
print 'len generators = %d | t = %d' % (len(generators), t)
def generate_train_data(generators, num_train, batch_size, current_model):
current_model, indexes = create_indexes(num_train, len(generators), current_model)
num_batches = int(np.floor(len(indexes) / batch_size))
indexes = indexes[0:num_batches * batch_size] # dump extra data
inputs, outputs, _ = _generate_from_index(generators, indexes)
return inputs, outputs, indexes, current_model
def generate_test_data(generators, num_train, batch_size):
indexes = list(np.arange(len(generators))) * num_train
num_batches = int(np.floor(len(indexes) / batch_size))
indexes = indexes[0:num_batches * batch_size] # dump extra data
return _generate_from_index(generators, indexes)
def evaluate_running_hist(vae):
vae_t = vae
current_vae = 0
while vae_t is not None:
print 'histogram[vae# %d]' % current_vae, vae_t.running_hist_host
vae_t = vae_t.vae_tm1
current_vae += 1
def rotate_svhn(generators):
''' rotates mnist to the angles specified below
adds (10x + 1) the number of distributions'''
rotated = []
for n in xrange(len(generators)):
for t in [30, 45, 70, 90, 130, 165, 200, 250, 295, 335]:
number = SVHN_Class(n, svhn)
number.mnist = SVHN_Class.rotate_all_sets(number.classes, n, t)
rotated.append(number)
generators = generators + rotated
print 'rotated generators length = ', len(generators)
return generators
def main():
if FLAGS.sequential:
all_mnist = AllMnist(one_hot=True,
is_flat=False,
resize_dims=[32, 32],
convert_to_rgb=True)
generators = [SVHN(one_hot=True), all_mnist] # [SVHN_Class(0, svhn)]
else:
generators = [SVHN(one_hot=True)]
print 'there are %d generators' % len(generators)
# rotate mnist if specified
if FLAGS.rotate_svhn:
generators = rotate_svhn(generators)
input_shape = TEST_SET_SVHN.images.shape[1:]
with tf.device(FLAGS.device):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.device_percentage)
sess_cfg = tf.ConfigProto(allow_soft_placement=FLAGS.allow_soft_placement,
gpu_options=gpu_options)
with tf.Session(config=sess_cfg) as sess:
vae = build_Nd_vae(sess, generators,
input_shape,
FLAGS.latent_size,
FLAGS.batch_size,
epochs=FLAGS.epochs)
# run a test inference and verify
if FLAGS.sequential:
print '\n############### Testing consistency #####################'
plot_ND_vae_consistency(sess, vae,
FLAGS.batch_size,
num_write=3)
print '.......done [see imgs/vae_consistency_*]'
print '###########################################################'
# evaluate the reconstruction loss under the test set
print 'MNIST: ',
evaluate_test_losses(sess,
vae,
FLAGS.batch_size,
TEST_SET_MNIST)
print 'SVHN: ',
evaluate_test_losses(sess,
vae,
FLAGS.batch_size,
TEST_SET_SVHN)
# 2d plot shows a cluster plot vs. a reconstruction plot
if FLAGS.latent_size == 2:
if not FLAGS.sequential:
x_sample, y_sample = generators[0].test.next_batch(10000)
elif FLAGS.sequential:
x_sample, y_sample \
= svhn.test.next_batch(10000)
if len(x_sample.shape) == 2:
# TODO: fix this later [broken for rgb imgs]
plot_2d_vae(sess, x_sample, y_sample,
vae, FLAGS.batch_size)
smooth_interpolate_latent_space(sess, vae)
else:
plot_Nd_vae(sess, generators, vae, FLAGS.batch_size,
TEST_SET_SVHN, prefix="svhn_")
plot_Nd_vae(sess, generators, vae, FLAGS.batch_size,
TEST_SET_MNIST, prefix="mnist_")
if __name__ == "__main__":
main()
| |
# mako/parsetree.py
# Copyright (C) 2006-2013 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""defines the parse tree components for Mako templates."""
from mako import exceptions, ast, util, filters, compat
import re
class Node(object):
"""base class for a Node in the parse tree."""
def __init__(self, source, lineno, pos, filename):
self.source = source
self.lineno = lineno
self.pos = pos
self.filename = filename
@property
def exception_kwargs(self):
return {'source': self.source, 'lineno': self.lineno,
'pos': self.pos, 'filename': self.filename}
def get_children(self):
return []
def accept_visitor(self, visitor):
def traverse(node):
for n in node.get_children():
n.accept_visitor(visitor)
method = getattr(visitor, "visit" + self.__class__.__name__, traverse)
method(self)
class TemplateNode(Node):
"""a 'container' node that stores the overall collection of nodes."""
def __init__(self, filename):
super(TemplateNode, self).__init__('', 0, 0, filename)
self.nodes = []
self.page_attributes = {}
def get_children(self):
return self.nodes
def __repr__(self):
return "TemplateNode(%s, %r)" % (
util.sorted_dict_repr(self.page_attributes),
self.nodes)
class ControlLine(Node):
"""defines a control line, a line-oriented python line or end tag.
e.g.::
% if foo:
(markup)
% endif
"""
has_loop_context = False
def __init__(self, keyword, isend, text, **kwargs):
super(ControlLine, self).__init__(**kwargs)
self.text = text
self.keyword = keyword
self.isend = isend
self.is_primary = keyword in ['for', 'if', 'while', 'try', 'with']
self.nodes = []
if self.isend:
self._declared_identifiers = []
self._undeclared_identifiers = []
else:
code = ast.PythonFragment(text, **self.exception_kwargs)
self._declared_identifiers = code.declared_identifiers
self._undeclared_identifiers = code.undeclared_identifiers
def get_children(self):
return self.nodes
def declared_identifiers(self):
return self._declared_identifiers
def undeclared_identifiers(self):
return self._undeclared_identifiers
def is_ternary(self, keyword):
"""return true if the given keyword is a ternary keyword
for this ControlLine"""
return keyword in {
'if':set(['else', 'elif']),
'try':set(['except', 'finally']),
'for':set(['else'])
}.get(self.keyword, [])
def __repr__(self):
return "ControlLine(%r, %r, %r, %r)" % (
self.keyword,
self.text,
self.isend,
(self.lineno, self.pos)
)
class Text(Node):
"""defines plain text in the template."""
def __init__(self, content, **kwargs):
super(Text, self).__init__(**kwargs)
self.content = content
def __repr__(self):
return "Text(%r, %r)" % (self.content, (self.lineno, self.pos))
class Code(Node):
"""defines a Python code block, either inline or module level.
e.g.::
inline:
<%
x = 12
%>
module level:
<%!
import logger
%>
"""
def __init__(self, text, ismodule, **kwargs):
super(Code, self).__init__(**kwargs)
self.text = text
self.ismodule = ismodule
self.code = ast.PythonCode(text, **self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers
def undeclared_identifiers(self):
return self.code.undeclared_identifiers
def __repr__(self):
return "Code(%r, %r, %r)" % (
self.text,
self.ismodule,
(self.lineno, self.pos)
)
class Comment(Node):
"""defines a comment line.
# this is a comment
"""
def __init__(self, text, **kwargs):
super(Comment, self).__init__(**kwargs)
self.text = text
def __repr__(self):
return "Comment(%r, %r)" % (self.text, (self.lineno, self.pos))
class Expression(Node):
"""defines an inline expression.
${x+y}
"""
def __init__(self, text, escapes, **kwargs):
super(Expression, self).__init__(**kwargs)
self.text = text
self.escapes = escapes
self.escapes_code = ast.ArgumentList(escapes, **self.exception_kwargs)
self.code = ast.PythonCode(text, **self.exception_kwargs)
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
# TODO: make the "filter" shortcut list configurable at parse/gen time
return self.code.undeclared_identifiers.union(
self.escapes_code.undeclared_identifiers.difference(
set(filters.DEFAULT_ESCAPES.keys())
)
).difference(self.code.declared_identifiers)
def __repr__(self):
return "Expression(%r, %r, %r)" % (
self.text,
self.escapes_code.args,
(self.lineno, self.pos)
)
class _TagMeta(type):
"""metaclass to allow Tag to produce a subclass according to
its keyword"""
_classmap = {}
def __init__(cls, clsname, bases, dict):
if getattr(cls, '__keyword__', None) is not None:
cls._classmap[cls.__keyword__] = cls
super(_TagMeta, cls).__init__(clsname, bases, dict)
def __call__(cls, keyword, attributes, **kwargs):
if ":" in keyword:
ns, defname = keyword.split(':')
return type.__call__(CallNamespaceTag, ns, defname,
attributes, **kwargs)
try:
cls = _TagMeta._classmap[keyword]
except KeyError:
raise exceptions.CompileException(
"No such tag: '%s'" % keyword,
source=kwargs['source'],
lineno=kwargs['lineno'],
pos=kwargs['pos'],
filename=kwargs['filename']
)
return type.__call__(cls, keyword, attributes, **kwargs)
class Tag(compat.with_metaclass(_TagMeta, Node)):
"""abstract base class for tags.
<%sometag/>
<%someothertag>
stuff
</%someothertag>
"""
__keyword__ = None
def __init__(self, keyword, attributes, expressions,
nonexpressions, required, **kwargs):
"""construct a new Tag instance.
this constructor not called directly, and is only called
by subclasses.
:param keyword: the tag keyword
:param attributes: raw dictionary of attribute key/value pairs
:param expressions: a set of identifiers that are legal attributes,
which can also contain embedded expressions
:param nonexpressions: a set of identifiers that are legal
attributes, which cannot contain embedded expressions
:param \**kwargs:
other arguments passed to the Node superclass (lineno, pos)
"""
super(Tag, self).__init__(**kwargs)
self.keyword = keyword
self.attributes = attributes
self._parse_attributes(expressions, nonexpressions)
missing = [r for r in required if r not in self.parsed_attributes]
if len(missing):
raise exceptions.CompileException(
"Missing attribute(s): %s" %
",".join([repr(m) for m in missing]),
**self.exception_kwargs)
self.parent = None
self.nodes = []
def is_root(self):
return self.parent is None
def get_children(self):
return self.nodes
def _parse_attributes(self, expressions, nonexpressions):
undeclared_identifiers = set()
self.parsed_attributes = {}
for key in self.attributes:
if key in expressions:
expr = []
for x in re.compile(r'(\${.+?})',
re.S).split(self.attributes[key]):
m = re.compile(r'^\${(.+?)}$', re.S).match(x)
if m:
code = ast.PythonCode(m.group(1).rstrip(),
**self.exception_kwargs)
# we aren't discarding "declared_identifiers" here,
# which we do so that list comprehension-declared
# variables aren't counted. As yet can't find a
# condition that requires it here.
undeclared_identifiers = \
undeclared_identifiers.union(
code.undeclared_identifiers)
expr.append('(%s)' % m.group(1))
else:
if x:
expr.append(repr(x))
self.parsed_attributes[key] = " + ".join(expr) or repr('')
elif key in nonexpressions:
if re.search(r'\${.+?}', self.attributes[key]):
raise exceptions.CompileException(
"Attibute '%s' in tag '%s' does not allow embedded "
"expressions" % (key, self.keyword),
**self.exception_kwargs)
self.parsed_attributes[key] = repr(self.attributes[key])
else:
raise exceptions.CompileException(
"Invalid attribute for tag '%s': '%s'" %
(self.keyword, key),
**self.exception_kwargs)
self.expression_undeclared_identifiers = undeclared_identifiers
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
return self.expression_undeclared_identifiers
def __repr__(self):
return "%s(%r, %s, %r, %r)" % (self.__class__.__name__,
self.keyword,
util.sorted_dict_repr(self.attributes),
(self.lineno, self.pos),
self.nodes
)
class IncludeTag(Tag):
__keyword__ = 'include'
def __init__(self, keyword, attributes, **kwargs):
super(IncludeTag, self).__init__(
keyword,
attributes,
('file', 'import', 'args'),
(), ('file',), **kwargs)
self.page_args = ast.PythonCode(
"__DUMMY(%s)" % attributes.get('args', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
identifiers = self.page_args.undeclared_identifiers.\
difference(set(["__DUMMY"])).\
difference(self.page_args.declared_identifiers)
return identifiers.union(super(IncludeTag, self).
undeclared_identifiers())
class NamespaceTag(Tag):
__keyword__ = 'namespace'
def __init__(self, keyword, attributes, **kwargs):
super(NamespaceTag, self).__init__(
keyword, attributes,
('file',),
('name','inheritable',
'import','module'),
(), **kwargs)
self.name = attributes.get('name', '__anon_%s' % hex(abs(id(self))))
if not 'name' in attributes and not 'import' in attributes:
raise exceptions.CompileException(
"'name' and/or 'import' attributes are required "
"for <%namespace>",
**self.exception_kwargs)
if 'file' in attributes and 'module' in attributes:
raise exceptions.CompileException(
"<%namespace> may only have one of 'file' or 'module'",
**self.exception_kwargs
)
def declared_identifiers(self):
return []
class TextTag(Tag):
__keyword__ = 'text'
def __init__(self, keyword, attributes, **kwargs):
super(TextTag, self).__init__(
keyword,
attributes, (),
('filter'), (), **kwargs)
self.filter_args = ast.ArgumentList(
attributes.get('filter', ''),
**self.exception_kwargs)
def undeclared_identifiers(self):
return self.filter_args.\
undeclared_identifiers.\
difference(filters.DEFAULT_ESCAPES.keys()).union(
self.expression_undeclared_identifiers
)
class DefTag(Tag):
__keyword__ = 'def'
def __init__(self, keyword, attributes, **kwargs):
expressions = ['buffered', 'cached'] + [
c for c in attributes if c.startswith('cache_')]
super(DefTag, self).__init__(
keyword,
attributes,
expressions,
('name', 'filter', 'decorator'),
('name',),
**kwargs)
name = attributes['name']
if re.match(r'^[\w_]+$', name):
raise exceptions.CompileException(
"Missing parenthesis in %def",
**self.exception_kwargs)
self.function_decl = ast.FunctionDecl("def " + name + ":pass",
**self.exception_kwargs)
self.name = self.function_decl.funcname
self.decorator = attributes.get('decorator', '')
self.filter_args = ast.ArgumentList(
attributes.get('filter', ''),
**self.exception_kwargs)
is_anonymous = False
is_block = False
@property
def funcname(self):
return self.function_decl.funcname
def get_argument_expressions(self, **kw):
return self.function_decl.get_argument_expressions(**kw)
def declared_identifiers(self):
return self.function_decl.argnames
def undeclared_identifiers(self):
res = []
for c in self.function_decl.defaults:
res += list(ast.PythonCode(c, **self.exception_kwargs).
undeclared_identifiers)
return set(res).union(
self.filter_args.\
undeclared_identifiers.\
difference(filters.DEFAULT_ESCAPES.keys())
).union(
self.expression_undeclared_identifiers
).difference(
self.function_decl.argnames
)
class BlockTag(Tag):
__keyword__ = 'block'
def __init__(self, keyword, attributes, **kwargs):
expressions = ['buffered', 'cached', 'args'] + [
c for c in attributes if c.startswith('cache_')]
super(BlockTag, self).__init__(
keyword,
attributes,
expressions,
('name','filter', 'decorator'),
(),
**kwargs)
name = attributes.get('name')
if name and not re.match(r'^[\w_]+$',name):
raise exceptions.CompileException(
"%block may not specify an argument signature",
**self.exception_kwargs)
if not name and attributes.get('args', None):
raise exceptions.CompileException(
"Only named %blocks may specify args",
**self.exception_kwargs
)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
self.name = name
self.decorator = attributes.get('decorator', '')
self.filter_args = ast.ArgumentList(
attributes.get('filter', ''),
**self.exception_kwargs)
is_block = True
@property
def is_anonymous(self):
return self.name is None
@property
def funcname(self):
return self.name or "__M_anon_%d" % (self.lineno, )
def get_argument_expressions(self, **kw):
return self.body_decl.get_argument_expressions(**kw)
def declared_identifiers(self):
return self.body_decl.argnames
def undeclared_identifiers(self):
return (self.filter_args.\
undeclared_identifiers.\
difference(filters.DEFAULT_ESCAPES.keys())
).union(self.expression_undeclared_identifiers)
class CallTag(Tag):
__keyword__ = 'call'
def __init__(self, keyword, attributes, **kwargs):
super(CallTag, self).__init__(keyword, attributes,
('args'), ('expr',), ('expr',), **kwargs)
self.expression = attributes['expr']
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.argnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers.\
difference(self.code.declared_identifiers)
class CallNamespaceTag(Tag):
def __init__(self, namespace, defname, attributes, **kwargs):
super(CallNamespaceTag, self).__init__(
namespace + ":" + defname,
attributes,
tuple(attributes.keys()) + ('args', ),
(),
(),
**kwargs)
self.expression = "%s.%s(%s)" % (
namespace,
defname,
",".join(["%s=%s" % (k, v) for k, v in
self.parsed_attributes.items()
if k != 'args'])
)
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(
attributes.get('args', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.argnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers.\
difference(self.code.declared_identifiers)
class InheritTag(Tag):
__keyword__ = 'inherit'
def __init__(self, keyword, attributes, **kwargs):
super(InheritTag, self).__init__(
keyword, attributes,
('file',), (), ('file',), **kwargs)
class PageTag(Tag):
__keyword__ = 'page'
def __init__(self, keyword, attributes, **kwargs):
expressions = ['cached', 'args', 'expression_filter', 'enable_loop'] + [
c for c in attributes if c.startswith('cache_')]
super(PageTag, self).__init__(
keyword,
attributes,
expressions,
(),
(),
**kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
self.filter_args = ast.ArgumentList(
attributes.get('expression_filter', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return self.body_decl.argnames
| |
from __future__ import unicode_literals
import fnmatch
import glob
import io
import os
import re
import sys
from functools import total_ordering
from itertools import dropwhile
import django
from django.conf import settings
from django.core.files.temp import NamedTemporaryFile
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import (
find_command, handle_extensions, popen_wrapper,
)
from django.utils._os import upath
from django.utils.encoding import DEFAULT_LOCALE_ENCODING, force_str
from django.utils.functional import cached_property
from django.utils.jslex import prepare_js_for_gettext
from django.utils.text import get_text_list
plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL)
STATUS_OK = 0
NO_LOCALE_DIR = object()
def check_programs(*programs):
for program in programs:
if find_command(program) is None:
raise CommandError("Can't find %s. Make sure you have GNU "
"gettext tools 0.15 or newer installed." % program)
@total_ordering
class TranslatableFile(object):
def __init__(self, dirpath, file_name, locale_dir):
self.file = file_name
self.dirpath = dirpath
self.locale_dir = locale_dir
def __repr__(self):
return "<TranslatableFile: %s>" % os.sep.join([self.dirpath, self.file])
def __eq__(self, other):
return self.path == other.path
def __lt__(self, other):
return self.path < other.path
@property
def path(self):
return os.path.join(self.dirpath, self.file)
class BuildFile(object):
"""
Represents the state of a translatable file during the build process.
"""
def __init__(self, command, domain, translatable):
self.command = command
self.domain = domain
self.translatable = translatable
@cached_property
def is_templatized(self):
if self.domain == 'djangojs':
return self.command.gettext_version < (0, 18, 3)
elif self.domain == 'django':
file_ext = os.path.splitext(self.translatable.file)[1]
return file_ext != '.py'
return False
@cached_property
def path(self):
return self.translatable.path
@cached_property
def work_path(self):
"""
Path to a file which is being fed into GNU gettext pipeline. This may
be either a translatable or its preprocessed version.
"""
if not self.is_templatized:
return self.path
extension = {
'djangojs': 'c',
'django': 'py',
}.get(self.domain)
filename = '%s.%s' % (self.translatable.file, extension)
return os.path.join(self.translatable.dirpath, filename)
def preprocess(self):
"""
Preprocess (if necessary) a translatable file before passing it to
xgettext GNU gettext utility.
"""
from django.utils.translation import templatize
if not self.is_templatized:
return
with io.open(self.path, 'r', encoding=settings.FILE_CHARSET) as fp:
src_data = fp.read()
if self.domain == 'djangojs':
content = prepare_js_for_gettext(src_data)
elif self.domain == 'django':
content = templatize(src_data, self.path[2:])
with io.open(self.work_path, 'w', encoding='utf-8') as fp:
fp.write(content)
def postprocess_messages(self, msgs):
"""
Postprocess messages generated by xgettext GNU gettext utility.
Transform paths as if these messages were generated from original
translatable files rather than from preprocessed versions.
"""
if not self.is_templatized:
return msgs
# Remove '.py' suffix
if os.name == 'nt':
# Preserve '.\' prefix on Windows to respect gettext behavior
old = '#: ' + self.work_path
new = '#: ' + self.path
else:
old = '#: ' + self.work_path[2:]
new = '#: ' + self.path[2:]
return msgs.replace(old, new)
def cleanup(self):
"""
Remove a preprocessed copy of a translatable file (if any).
"""
if self.is_templatized:
# This check is needed for the case of a symlinked file and its
# source being processed inside a single group (locale dir);
# removing either of those two removes both.
if os.path.exists(self.work_path):
os.unlink(self.work_path)
def write_pot_file(potfile, msgs):
"""
Write the :param potfile: POT file with the :param msgs: contents,
previously making sure its format is valid.
"""
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
with io.open(potfile, 'a', encoding='utf-8') as fp:
fp.write(msgs)
class Command(BaseCommand):
help = ("Runs over the entire source tree of the current directory and "
"pulls out all strings marked for translation. It creates (or updates) a message "
"file in the conf/locale (in the django tree) or locale (for projects and "
"applications) directory.\n\nYou must run this command with one of either the "
"--locale, --exclude or --all options.")
translatable_file_class = TranslatableFile
build_file_class = BuildFile
requires_system_checks = False
leave_locale_alone = True
msgmerge_options = ['-q', '--previous']
msguniq_options = ['--to-code=utf-8']
msgattrib_options = ['--no-obsolete']
xgettext_options = ['--from-code=UTF-8', '--add-comments=Translators']
def add_arguments(self, parser):
parser.add_argument('--locale', '-l', default=[], dest='locale', action='append',
help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). '
'Can be used multiple times.')
parser.add_argument('--exclude', '-x', default=[], dest='exclude', action='append',
help='Locales to exclude. Default is none. Can be used multiple times.')
parser.add_argument('--domain', '-d', default='django', dest='domain',
help='The domain of the message files (default: "django").')
parser.add_argument('--all', '-a', action='store_true', dest='all',
default=False, help='Updates the message files for all existing locales.')
parser.add_argument('--extension', '-e', dest='extensions',
help='The file extension(s) to examine (default: "html,txt,py", or "js" '
'if the domain is "djangojs"). Separate multiple extensions with '
'commas, or use -e multiple times.',
action='append')
parser.add_argument('--symlinks', '-s', action='store_true', dest='symlinks',
default=False, help='Follows symlinks to directories when examining '
'source code and templates for translation strings.')
parser.add_argument('--ignore', '-i', action='append', dest='ignore_patterns',
default=[], metavar='PATTERN',
help='Ignore files or directories matching this glob-style pattern. '
'Use multiple times to ignore more.')
parser.add_argument('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
default=True, help="Don't ignore the common glob-style patterns 'CVS', '.*', '*~' and '*.pyc'.")
parser.add_argument('--no-wrap', action='store_true', dest='no_wrap',
default=False, help="Don't break long message lines into several lines.")
parser.add_argument('--no-location', action='store_true', dest='no_location',
default=False, help="Don't write '#: filename:line' lines.")
parser.add_argument('--no-obsolete', action='store_true', dest='no_obsolete',
default=False, help="Remove obsolete message strings.")
parser.add_argument('--keep-pot', action='store_true', dest='keep_pot',
default=False, help="Keep .pot file after making messages. Useful when debugging.")
def handle(self, *args, **options):
locale = options['locale']
exclude = options['exclude']
self.domain = options['domain']
self.verbosity = options['verbosity']
process_all = options['all']
extensions = options['extensions']
self.symlinks = options['symlinks']
# Need to ensure that the i18n framework is enabled
if settings.configured:
settings.USE_I18N = True
else:
settings.configure(USE_I18N=True)
ignore_patterns = options['ignore_patterns']
if options['use_default_ignore_patterns']:
ignore_patterns += ['CVS', '.*', '*~', '*.pyc']
self.ignore_patterns = list(set(ignore_patterns))
# Avoid messing with mutable class variables
if options['no_wrap']:
self.msgmerge_options = self.msgmerge_options[:] + ['--no-wrap']
self.msguniq_options = self.msguniq_options[:] + ['--no-wrap']
self.msgattrib_options = self.msgattrib_options[:] + ['--no-wrap']
self.xgettext_options = self.xgettext_options[:] + ['--no-wrap']
if options['no_location']:
self.msgmerge_options = self.msgmerge_options[:] + ['--no-location']
self.msguniq_options = self.msguniq_options[:] + ['--no-location']
self.msgattrib_options = self.msgattrib_options[:] + ['--no-location']
self.xgettext_options = self.xgettext_options[:] + ['--no-location']
self.no_obsolete = options['no_obsolete']
self.keep_pot = options['keep_pot']
if self.domain not in ('django', 'djangojs'):
raise CommandError("currently makemessages only supports domains "
"'django' and 'djangojs'")
if self.domain == 'djangojs':
exts = extensions if extensions else ['js']
else:
exts = extensions if extensions else ['html', 'txt', 'py']
self.extensions = handle_extensions(exts)
if (locale is None and not exclude and not process_all) or self.domain is None:
raise CommandError("Type '%s help %s' for usage information." % (
os.path.basename(sys.argv[0]), sys.argv[1]))
if self.verbosity > 1:
self.stdout.write('examining files with the extensions: %s\n'
% get_text_list(list(self.extensions), 'and'))
self.invoked_for_django = False
self.locale_paths = []
self.default_locale_path = None
if os.path.isdir(os.path.join('conf', 'locale')):
self.locale_paths = [os.path.abspath(os.path.join('conf', 'locale'))]
self.default_locale_path = self.locale_paths[0]
self.invoked_for_django = True
else:
self.locale_paths.extend(settings.LOCALE_PATHS)
# Allow to run makemessages inside an app dir
if os.path.isdir('locale'):
self.locale_paths.append(os.path.abspath('locale'))
if self.locale_paths:
self.default_locale_path = self.locale_paths[0]
if not os.path.exists(self.default_locale_path):
os.makedirs(self.default_locale_path)
# Build locale list
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % self.default_locale_path))
all_locales = map(os.path.basename, locale_dirs)
# Account for excluded locales
if process_all:
locales = all_locales
else:
locales = locale or all_locales
locales = set(locales) - set(exclude)
if locales:
check_programs('msguniq', 'msgmerge', 'msgattrib')
check_programs('xgettext')
try:
potfiles = self.build_potfiles()
# Build po files for each selected locale
for locale in locales:
if self.verbosity > 0:
self.stdout.write("processing locale %s\n" % locale)
for potfile in potfiles:
self.write_po_file(potfile, locale)
finally:
if not self.keep_pot:
self.remove_potfiles()
@cached_property
def gettext_version(self):
# Gettext tools will output system-encoded bytestrings instead of UTF-8,
# when looking up the version. It's especially a problem on Windows.
out, err, status = popen_wrapper(
['xgettext', '--version'],
stdout_encoding=DEFAULT_LOCALE_ENCODING,
)
m = re.search(r'(\d+)\.(\d+)\.?(\d+)?', out)
if m:
return tuple(int(d) for d in m.groups() if d is not None)
else:
raise CommandError("Unable to get gettext version. Is it installed?")
def build_potfiles(self):
"""
Build pot files and apply msguniq to them.
"""
file_list = self.find_files(".")
self.remove_potfiles()
self.process_files(file_list)
potfiles = []
for path in self.locale_paths:
potfile = os.path.join(path, '%s.pot' % str(self.domain))
if not os.path.exists(potfile):
continue
args = ['msguniq'] + self.msguniq_options + [potfile]
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msguniq\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
with io.open(potfile, 'w', encoding='utf-8') as fp:
fp.write(msgs)
potfiles.append(potfile)
return potfiles
def remove_potfiles(self):
for path in self.locale_paths:
pot_path = os.path.join(path, '%s.pot' % str(self.domain))
if os.path.exists(pot_path):
os.unlink(pot_path)
def find_files(self, root):
"""
Helper method to get all files in the given root. Also check that there
is a matching locale dir for each file.
"""
def is_ignored(path, ignore_patterns):
"""
Check if the given path should be ignored or not.
"""
filename = os.path.basename(path)
def ignore(pattern):
return fnmatch.fnmatchcase(filename, pattern) or fnmatch.fnmatchcase(path, pattern)
return any(ignore(pattern) for pattern in ignore_patterns)
ignore_patterns = [os.path.normcase(p) for p in self.ignore_patterns]
dir_suffixes = {'%s*' % path_sep for path_sep in {'/', os.sep}}
norm_patterns = []
for p in ignore_patterns:
for dir_suffix in dir_suffixes:
if p.endswith(dir_suffix):
norm_patterns.append(p[:-len(dir_suffix)])
break
else:
norm_patterns.append(p)
all_files = []
ignored_roots = [os.path.normpath(p) for p in (settings.MEDIA_ROOT, settings.STATIC_ROOT) if p]
for dirpath, dirnames, filenames in os.walk(root, topdown=True, followlinks=self.symlinks):
for dirname in dirnames[:]:
if (is_ignored(os.path.normpath(os.path.join(dirpath, dirname)), norm_patterns) or
os.path.join(os.path.abspath(dirpath), dirname) in ignored_roots):
dirnames.remove(dirname)
if self.verbosity > 1:
self.stdout.write('ignoring directory %s\n' % dirname)
elif dirname == 'locale':
dirnames.remove(dirname)
self.locale_paths.insert(0, os.path.join(os.path.abspath(dirpath), dirname))
for filename in filenames:
file_path = os.path.normpath(os.path.join(dirpath, filename))
file_ext = os.path.splitext(filename)[1]
if file_ext not in self.extensions or is_ignored(file_path, self.ignore_patterns):
if self.verbosity > 1:
self.stdout.write('ignoring file %s in %s\n' % (filename, dirpath))
else:
locale_dir = None
for path in self.locale_paths:
if os.path.abspath(dirpath).startswith(os.path.dirname(path)):
locale_dir = path
break
if not locale_dir:
locale_dir = self.default_locale_path
if not locale_dir:
locale_dir = NO_LOCALE_DIR
all_files.append(self.translatable_file_class(dirpath, filename, locale_dir))
return sorted(all_files)
def process_files(self, file_list):
"""
Group translatable files by locale directory and run pot file build
process for each group.
"""
file_groups = {}
for translatable in file_list:
file_group = file_groups.setdefault(translatable.locale_dir, [])
file_group.append(translatable)
for locale_dir, files in file_groups.items():
self.process_locale_dir(locale_dir, files)
def process_locale_dir(self, locale_dir, files):
"""
Extract translatable literals from the specified files, creating or
updating the POT file for a given locale directory.
Uses the xgettext GNU gettext utility.
"""
build_files = []
for translatable in files:
if self.verbosity > 1:
self.stdout.write('processing file %s in %s\n' % (
translatable.file, translatable.dirpath
))
if self.domain not in ('djangojs', 'django'):
continue
build_file = self.build_file_class(self, self.domain, translatable)
try:
build_file.preprocess()
except UnicodeDecodeError as e:
self.stdout.write(
'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % (
translatable.file, translatable.dirpath, e,
)
)
continue
build_files.append(build_file)
if self.domain == 'djangojs':
is_templatized = build_file.is_templatized
args = [
'xgettext',
'-d', self.domain,
'--language=%s' % ('C' if is_templatized else 'JavaScript',),
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--output=-',
]
elif self.domain == 'django':
args = [
'xgettext',
'-d', self.domain,
'--language=Python',
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=ugettext_noop',
'--keyword=ugettext_lazy',
'--keyword=ungettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--keyword=pgettext_lazy:1c,2',
'--keyword=npgettext_lazy:1c,2,3',
'--output=-',
]
else:
return
input_files = [bf.work_path for bf in build_files]
with NamedTemporaryFile(mode='w+') as input_files_list:
input_files_list.write('\n'.join(input_files))
input_files_list.flush()
args.extend(['--files-from', input_files_list.name])
args.extend(self.xgettext_options)
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
for build_file in build_files:
build_file.cleanup()
raise CommandError(
'errors happened while running xgettext on %s\n%s' %
('\n'.join(input_files), errors)
)
elif self.verbosity > 0:
# Print warnings
self.stdout.write(errors)
if msgs:
if locale_dir is NO_LOCALE_DIR:
file_path = os.path.normpath(build_files[0].path)
raise CommandError(
'Unable to find a locale path to store translations for '
'file %s' % file_path
)
for build_file in build_files:
msgs = build_file.postprocess_messages(msgs)
potfile = os.path.join(locale_dir, '%s.pot' % str(self.domain))
write_pot_file(potfile, msgs)
for build_file in build_files:
build_file.cleanup()
def write_po_file(self, potfile, locale):
"""
Creates or updates the PO file for self.domain and :param locale:.
Uses contents of the existing :param potfile:.
Uses msgmerge, and msgattrib GNU gettext utilities.
"""
basedir = os.path.join(os.path.dirname(potfile), locale, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % str(self.domain))
if os.path.exists(pofile):
args = ['msgmerge'] + self.msgmerge_options + [pofile, potfile]
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgmerge\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
else:
with io.open(potfile, 'r', encoding='utf-8') as fp:
msgs = fp.read()
if not self.invoked_for_django:
msgs = self.copy_plural_forms(msgs, locale)
msgs = msgs.replace(
"#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % self.domain, "")
with io.open(pofile, 'w', encoding='utf-8') as fp:
fp.write(msgs)
if self.no_obsolete:
args = ['msgattrib'] + self.msgattrib_options + ['-o', pofile, pofile]
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgattrib\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
def copy_plural_forms(self, msgs, locale):
"""
Copies plural forms header contents from a Django catalog of locale to
the msgs string, inserting it at the right place. msgs should be the
contents of a newly created .po file.
"""
django_dir = os.path.normpath(os.path.join(os.path.dirname(upath(django.__file__))))
if self.domain == 'djangojs':
domains = ('djangojs', 'django')
else:
domains = ('django',)
for domain in domains:
django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain)
if os.path.exists(django_po):
with io.open(django_po, 'r', encoding='utf-8') as fp:
m = plural_forms_re.search(fp.read())
if m:
plural_form_line = force_str(m.group('value'))
if self.verbosity > 1:
self.stdout.write("copying plural forms: %s\n" % plural_form_line)
lines = []
found = False
for line in msgs.split('\n'):
if not found and (not line or plural_forms_re.search(line)):
line = '%s\n' % plural_form_line
found = True
lines.append(line)
msgs = '\n'.join(lines)
break
return msgs
| |
"""
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <matthieu.perrot@gmail.com>
#
# License: BSD Style.
import warnings
import numpy as np
import scipy.ndimage as ndimage
from .base import BaseEstimator, ClassifierMixin
# FIXME :
# - in fit(X, y) method, many checks are common with other models
# (in particular LDA model) and should be factorized:
# maybe in BaseEstimator ?
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector relative to X
priors : array, optional, shape = [n_classes]
Priors on classes
Attributes
----------
`means_` : array-like, shape = [n_classes, n_features]
Class means
`priors_` : array-like, shape = [n_classes]
Class priors (sum to 1)
`covariances_` : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None)
>>> print clf.predict([[-0.8, -1]])
[1]
See also
--------
LDA
"""
def __init__(self, priors=None):
self.priors = np.asarray(priors) if priors is not None else None
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
self.covariances_ attribute.
"""
X = np.asarray(X)
y = np.asarray(y)
if X.ndim != 2:
raise ValueError('X must be a 2D array')
if X.shape[0] != y.shape[0]:
raise ValueError(
'Incompatible shapes: X has %s samples, while y '
'has %s' % (X.shape[0], y.shape[0]))
if y.dtype.char.lower() not in ('b', 'h', 'i'):
# We need integer values to be able to use
# ndimage.measurements and np.bincount on numpy >= 2.0.
# We currently support (u)int8, (u)int16 and (u)int32.
# Note that versions of scipy >= 0.8 can also accept
# (u)int64. We however don't support it for backwards
# compatibility.
y = y.astype(np.int32)
n_samples, n_features = X.shape
classes = np.unique(y)
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
classes_indices = [(y == c).ravel() for c in classes]
if self.priors is None:
counts = np.array(ndimage.measurements.sum(
np.ones(n_samples, dtype=y.dtype), y, index=classes))
self.priors_ = counts / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for group_indices in classes_indices:
Xg = X[group_indices, :]
meang = Xg.mean(0)
means.append(meang)
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings = np.asarray(scalings)
self.rotations = rotations
self.classes = classes
return self
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes]
Decision function values related to each class, per sample.
"""
X = np.asarray(X)
norm2 = []
for i in range(len(self.classes)):
R = self.rotations[i]
S = self.scalings[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
return (-0.5 * (norm2 + np.sum(np.log(self.scalings), 1))
+ np.log(self.priors_))
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self.decision_function(X)
y_pred = self.classes[d.argmax(1)]
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self.decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.min(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| |
"""Support for LaCrosse sensor components."""
from datetime import timedelta
import logging
import pylacrosse
from serial import SerialException
import voluptuous as vol
from homeassistant.components.sensor import ENTITY_ID_FORMAT, PLATFORM_SCHEMA
from homeassistant.const import (
CONF_DEVICE,
CONF_ID,
CONF_NAME,
CONF_SENSORS,
CONF_TYPE,
EVENT_HOMEASSISTANT_STOP,
TEMP_CELSIUS,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_BAUD = "baud"
CONF_DATARATE = "datarate"
CONF_EXPIRE_AFTER = "expire_after"
CONF_FREQUENCY = "frequency"
CONF_JEELINK_LED = "led"
CONF_TOGGLE_INTERVAL = "toggle_interval"
CONF_TOGGLE_MASK = "toggle_mask"
DEFAULT_DEVICE = "/dev/ttyUSB0"
DEFAULT_BAUD = "57600"
DEFAULT_EXPIRE_AFTER = 300
TYPES = ["battery", "humidity", "temperature"]
SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_ID): cv.positive_int,
vol.Required(CONF_TYPE): vol.In(TYPES),
vol.Optional(CONF_EXPIRE_AFTER): cv.positive_int,
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(SENSOR_SCHEMA),
vol.Optional(CONF_BAUD, default=DEFAULT_BAUD): cv.string,
vol.Optional(CONF_DATARATE): cv.positive_int,
vol.Optional(CONF_DEVICE, default=DEFAULT_DEVICE): cv.string,
vol.Optional(CONF_FREQUENCY): cv.positive_int,
vol.Optional(CONF_JEELINK_LED): cv.boolean,
vol.Optional(CONF_TOGGLE_INTERVAL): cv.positive_int,
vol.Optional(CONF_TOGGLE_MASK): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the LaCrosse sensors."""
usb_device = config.get(CONF_DEVICE)
baud = int(config.get(CONF_BAUD))
expire_after = config.get(CONF_EXPIRE_AFTER)
_LOGGER.debug("%s %s", usb_device, baud)
try:
lacrosse = pylacrosse.LaCrosse(usb_device, baud)
lacrosse.open()
except SerialException as exc:
_LOGGER.warning("Unable to open serial port: %s", exc)
return False
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, lacrosse.close)
if CONF_JEELINK_LED in config:
lacrosse.led_mode_state(config.get(CONF_JEELINK_LED))
if CONF_FREQUENCY in config:
lacrosse.set_frequency(config.get(CONF_FREQUENCY))
if CONF_DATARATE in config:
lacrosse.set_datarate(config.get(CONF_DATARATE))
if CONF_TOGGLE_INTERVAL in config:
lacrosse.set_toggle_interval(config.get(CONF_TOGGLE_INTERVAL))
if CONF_TOGGLE_MASK in config:
lacrosse.set_toggle_mask(config.get(CONF_TOGGLE_MASK))
lacrosse.start_scan()
sensors = []
for device, device_config in config[CONF_SENSORS].items():
_LOGGER.debug("%s %s", device, device_config)
typ = device_config.get(CONF_TYPE)
sensor_class = TYPE_CLASSES[typ]
name = device_config.get(CONF_NAME, device)
sensors.append(
sensor_class(hass, lacrosse, device, name, expire_after, device_config)
)
add_entities(sensors)
class LaCrosseSensor(Entity):
"""Implementation of a Lacrosse sensor."""
_temperature = None
_humidity = None
_low_battery = None
_new_battery = None
def __init__(self, hass, lacrosse, device_id, name, expire_after, config):
"""Initialize the sensor."""
self.hass = hass
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._config = config
self._name = name
self._value = None
self._expire_after = expire_after
self._expiration_trigger = None
lacrosse.register_callback(
int(self._config["id"]), self._callback_lacrosse, None
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
attributes = {
"low_battery": self._low_battery,
"new_battery": self._new_battery,
}
return attributes
def _callback_lacrosse(self, lacrosse_sensor, user_data):
"""Handle a function that is called from pylacrosse with new values."""
if self._expire_after is not None and self._expire_after > 0:
# Reset old trigger
if self._expiration_trigger:
self._expiration_trigger()
self._expiration_trigger = None
# Set new trigger
expiration_at = dt_util.utcnow() + timedelta(seconds=self._expire_after)
self._expiration_trigger = async_track_point_in_utc_time(
self.hass, self.value_is_expired, expiration_at
)
self._temperature = lacrosse_sensor.temperature
self._humidity = lacrosse_sensor.humidity
self._low_battery = lacrosse_sensor.low_battery
self._new_battery = lacrosse_sensor.new_battery
@callback
def value_is_expired(self, *_):
"""Triggered when value is expired."""
self._expiration_trigger = None
self._value = None
self.async_schedule_update_ha_state()
class LaCrosseTemperature(LaCrosseSensor):
"""Implementation of a Lacrosse temperature sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def state(self):
"""Return the state of the sensor."""
return self._temperature
class LaCrosseHumidity(LaCrosseSensor):
"""Implementation of a Lacrosse humidity sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "%"
@property
def state(self):
"""Return the state of the sensor."""
return self._humidity
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:water-percent"
class LaCrosseBattery(LaCrosseSensor):
"""Implementation of a Lacrosse battery sensor."""
@property
def state(self):
"""Return the state of the sensor."""
if self._low_battery is None:
state = None
elif self._low_battery is True:
state = "low"
else:
state = "ok"
return state
@property
def icon(self):
"""Icon to use in the frontend."""
if self._low_battery is None:
icon = "mdi:battery-unknown"
elif self._low_battery is True:
icon = "mdi:battery-alert"
else:
icon = "mdi:battery"
return icon
TYPE_CLASSES = {
"temperature": LaCrosseTemperature,
"humidity": LaCrosseHumidity,
"battery": LaCrosseBattery,
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.