gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
"""Implements VFSHandlers for files on the client."""
import logging
import os
import platform
import re
import sys
import threading
from grr.client import client_utils
from grr.client import vfs
from grr.lib import utils
from grr.lib.rdfvalues import client
from grr.lib.rdfvalues import paths
# File handles are cached here. They expire after a couple minutes so
# we don't keep files locked on the client.
FILE_HANDLE_CACHE = utils.TimeBasedCache(max_age=300)
class LockedFileHandle(object):
"""An object which encapsulates access to a file."""
def __init__(self, filename):
self.lock = threading.RLock()
self.fd = open(filename, "rb")
self.filename = filename
def Seek(self, offset, whence=0):
self.fd.seek(offset, whence)
def Read(self, length):
return self.fd.read(length)
def Tell(self):
return self.fd.tell()
def Close(self):
with self.lock:
self.fd.close()
class FileHandleManager(object):
"""An exclusive accesssor for a filehandle."""
def __init__(self, filename):
self.filename = filename
def __enter__(self):
try:
self.fd = FILE_HANDLE_CACHE.Get(self.filename)
except KeyError:
self.fd = LockedFileHandle(self.filename)
FILE_HANDLE_CACHE.Put(self.filename, self.fd)
# Wait for exclusive access to this file handle.
self.fd.lock.acquire()
return self.fd
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
self.fd.lock.release()
def MakeStatResponse(st, pathspec):
"""Creates a StatEntry."""
response = client.StatEntry(pathspec=pathspec)
if st is None:
# Special case empty stat if we don't have a real value, e.g. we get Access
# denied when stating a file. We still want to give back a value so we let
# the defaults from the proto pass through.
pass
else:
# Now fill in the stat value
for attr in ["st_mode",
"st_ino",
"st_dev",
"st_nlink",
"st_uid",
"st_gid",
"st_size",
"st_atime",
"st_mtime",
"st_ctime",
"st_blocks",
"st_blksize",
"st_rdev"]:
try:
value = long(getattr(st, attr))
if value < 0: value &= 0xFFFFFFFF
setattr(response, attr, value)
except AttributeError:
pass
return response
class File(vfs.VFSHandler):
"""Read a regular file."""
supported_pathtype = paths.PathSpec.PathType.OS
auto_register = True
# The file descriptor of the OS file.
fd = None
files = None
# Directories do not have a size.
size = None
# On windows reading devices must have an alignment.
alignment = 1
file_offset = 0
def __init__(self, base_fd, pathspec=None, progress_callback=None):
super(File, self).__init__(base_fd, pathspec=pathspec,
progress_callback=progress_callback)
if base_fd is None:
self.pathspec.Append(pathspec)
# We can stack on another directory, which means we concatenate their
# directory with ours.
elif base_fd.IsDirectory():
self.pathspec.last.path = utils.JoinPath(self.pathspec.last.path,
pathspec.path)
else:
raise IOError("File handler can not be stacked on another handler.")
self.path = self.pathspec.last.path
# We can optionally apply a global offset to the file.
if self.pathspec[0].HasField("offset"):
self.file_offset = self.pathspec[0].offset
self.pathspec.last.path_options = paths.PathSpec.Options.CASE_LITERAL
self.WindowsHacks()
self.filename = client_utils.CanonicalPathToLocalPath(self.path)
error = None
# Pythonic way - duck typing. Is the handle a directory?
try:
if not self.files:
# Note that the encoding of local path is system specific
local_path = client_utils.CanonicalPathToLocalPath(self.path + "/")
self.files = [utils.SmartUnicode(entry) for entry in
os.listdir(local_path)]
# Some filesystems do not support unicode properly
except UnicodeEncodeError as e:
raise IOError(str(e))
except (IOError, OSError) as e:
self.files = []
error = e
# Ok, it's not. Is it a file then?
try:
with FileHandleManager(self.filename) as fd:
# Work out how large the file is
if self.size is None:
fd.Seek(0, 2)
self.size = fd.Tell() - self.file_offset
error = None
# Some filesystems do not support unicode properly
except UnicodeEncodeError as e:
raise IOError(str(e))
except IOError as e:
if error:
error = e
if error is not None:
raise error # pylint: disable=raising-bad-type
def WindowsHacks(self):
"""Windows specific hacks to make the filesystem look normal."""
if sys.platform == "win32":
import win32api # pylint: disable=g-import-not-at-top
# Make the filesystem look like the topmost level are the drive letters.
if self.path == "/":
self.files = win32api.GetLogicalDriveStrings().split("\x00")
# Remove empty strings and strip trailing backslashes.
self.files = [drive.rstrip("\\") for drive in self.files if drive]
# This regex will match the various windows devices. Raw hard disk devices
# must be considered files, however in windows, if we try to list them as
# directories this also works. Since the code above distinguished between
# files and directories using the file listing property, we must force
# treating raw devices as files.
elif re.match(r"/*\\\\.\\[^\\]+\\?$", self.path) is not None:
# Special case windows devices cant seek to the end so just lie about
# the size
self.size = 0x7fffffffffffffff
# Windows raw devices can be opened in two incompatible modes. With a
# trailing \ they look like a directory, but without they are the raw
# device. In GRR we only support opening devices in raw mode so ensure
# that we never append a \ to raw device name.
self.path = self.path.rstrip("\\")
# In windows raw devices must be accessed using sector alignment.
self.alignment = 512
def ListNames(self):
return self.files or []
def Read(self, length):
"""Read from the file."""
if self.progress_callback:
self.progress_callback()
with FileHandleManager(self.filename) as fd:
offset = self.file_offset + self.offset
pre_padding = offset % self.alignment
# Due to alignment we read some more data than we need to.
aligned_offset = offset - pre_padding
fd.Seek(aligned_offset)
data = fd.Read(length + pre_padding)
self.offset += len(data) - pre_padding
return data[pre_padding:]
def Stat(self, path=None):
"""Returns stat information of a specific path.
Args:
path: a Unicode string containing the path or None.
If path is None the value in self.path is used.
Returns:
a StatResponse proto
Raises:
IOError when call to os.stat() fails
"""
# Note that the encoding of local path is system specific
local_path = client_utils.CanonicalPathToLocalPath(
path or self.path)
try:
st = os.stat(local_path)
except IOError as e:
logging.info("Failed to Stat %s. Err: %s", path or self.path, e)
st = None
result = MakeStatResponse(st, self.pathspec)
# Is this a symlink? If so we need to note the real location of the file.
try:
result.symlink = utils.SmartUnicode(os.readlink(local_path))
except (OSError, AttributeError):
pass
return result
def ListFiles(self):
"""List all files in the dir."""
if not self.IsDirectory():
raise IOError("%s is not a directory." % self.path)
else:
for path in self.files:
try:
response = self.Stat(utils.JoinPath(self.path, path))
pathspec = self.pathspec.Copy()
pathspec.last.path = utils.JoinPath(pathspec.last.path, path)
response.pathspec = pathspec
yield response
except OSError:
pass
def IsDirectory(self):
return self.size is None
def StatFS(self, path=None):
"""Call os.statvfs for a given list of paths. OS X and Linux only.
Note that a statvfs call for a network filesystem (e.g. NFS) that is
unavailable, e.g. due to no network, will result in the call blocking.
Args:
path: a Unicode string containing the path or None.
If path is None the value in self.path is used.
Returns:
posix.statvfs_result object
Raises:
RuntimeError: if called on windows
"""
if platform.system() == "Windows":
raise RuntimeError("os.statvfs not available on Windows")
local_path = client_utils.CanonicalPathToLocalPath(
path or self.path)
return os.statvfs(local_path)
def GetMountPoint(self, path=None):
"""Walk back from the path to find the mount point.
Args:
path: a Unicode string containing the path or None.
If path is None the value in self.path is used.
Returns:
path string of the mount point
"""
path = os.path.abspath(client_utils.CanonicalPathToLocalPath(
path or self.path))
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
|
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
from oslo_versionedobjects import exception as ovo_exc
import six
from nova.network import model as network_model
from nova.objects import fields
from nova import signature_utils
from nova import test
from nova import utils
class FakeFieldType(fields.FieldType):
def coerce(self, obj, attr, value):
return '*%s*' % value
def to_primitive(self, obj, attr, value):
return '!%s!' % value
def from_primitive(self, obj, attr, value):
return value[1:-1]
class FakeEnum(fields.Enum):
FROG = "frog"
PLATYPUS = "platypus"
ALLIGATOR = "alligator"
ALL = (FROG, PLATYPUS, ALLIGATOR)
def __init__(self, **kwargs):
super(FakeEnum, self).__init__(valid_values=FakeEnum.ALL,
**kwargs)
class FakeEnumAlt(fields.Enum):
FROG = "frog"
PLATYPUS = "platypus"
AARDVARK = "aardvark"
ALL = (FROG, PLATYPUS, AARDVARK)
def __init__(self, **kwargs):
super(FakeEnumAlt, self).__init__(valid_values=FakeEnumAlt.ALL,
**kwargs)
class FakeEnumField(fields.BaseEnumField):
AUTO_TYPE = FakeEnum()
class FakeEnumAltField(fields.BaseEnumField):
AUTO_TYPE = FakeEnumAlt()
class TestField(test.NoDBTestCase):
def setUp(self):
super(TestField, self).setUp()
self.field = fields.Field(FakeFieldType())
self.coerce_good_values = [('foo', '*foo*')]
self.coerce_bad_values = []
self.to_primitive_values = [('foo', '!foo!')]
self.from_primitive_values = [('!foo!', 'foo')]
def test_coerce_good_values(self):
for in_val, out_val in self.coerce_good_values:
self.assertEqual(out_val, self.field.coerce('obj', 'attr', in_val))
def test_coerce_bad_values(self):
for in_val in self.coerce_bad_values:
self.assertRaises((TypeError, ValueError),
self.field.coerce, 'obj', 'attr', in_val)
def test_to_primitive(self):
for in_val, prim_val in self.to_primitive_values:
self.assertEqual(prim_val, self.field.to_primitive('obj', 'attr',
in_val))
def test_from_primitive(self):
class ObjectLikeThing(object):
_context = 'context'
for prim_val, out_val in self.from_primitive_values:
self.assertEqual(out_val, self.field.from_primitive(
ObjectLikeThing, 'attr', prim_val))
def test_stringify(self):
self.assertEqual('123', self.field.stringify(123))
class TestString(TestField):
def setUp(self):
super(TestString, self).setUp()
self.field = fields.StringField()
self.coerce_good_values = [('foo', 'foo'), (1, '1'), (True, 'True')]
if six.PY2:
self.coerce_good_values.append((int(1), '1'))
self.coerce_bad_values = [None]
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'123'", self.field.stringify(123))
class TestBaseEnum(TestField):
def setUp(self):
super(TestBaseEnum, self).setUp()
self.field = FakeEnumField()
self.coerce_good_values = [('frog', 'frog'),
('platypus', 'platypus'),
('alligator', 'alligator')]
self.coerce_bad_values = ['aardvark', 'wookie']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'platypus'", self.field.stringify('platypus'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'aardvark')
def test_fingerprint(self):
# Notes(yjiang5): make sure changing valid_value will be detected
# in test_objects.test_versions
field1 = FakeEnumField()
field2 = FakeEnumAltField()
self.assertNotEqual(str(field1), str(field2))
class TestEnum(TestField):
def setUp(self):
super(TestEnum, self).setUp()
self.field = fields.EnumField(
valid_values=['foo', 'bar', 1, 1, True])
self.coerce_good_values = [('foo', 'foo'), (1, '1'), (True, 'True')]
if six.PY2:
self.coerce_good_values.append((int(1), '1'))
self.coerce_bad_values = ['boo', 2, False]
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'foo'", self.field.stringify('foo'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, '123')
def test_fingerprint(self):
# Notes(yjiang5): make sure changing valid_value will be detected
# in test_objects.test_versions
field1 = fields.EnumField(valid_values=['foo', 'bar'])
field2 = fields.EnumField(valid_values=['foo', 'bar1'])
self.assertNotEqual(str(field1), str(field2))
def test_without_valid_values(self):
self.assertRaises(ovo_exc.EnumValidValuesInvalidError,
fields.EnumField, 1)
def test_with_empty_values(self):
self.assertRaises(ovo_exc.EnumRequiresValidValuesError,
fields.EnumField, [])
class TestImageSignatureTypes(TestField):
# Ensure that the object definition is updated
# in step with the signature_utils module
def setUp(self):
super(TestImageSignatureTypes, self).setUp()
self.hash_field = fields.ImageSignatureHashType()
self.key_type_field = fields.ImageSignatureKeyType()
def test_hashes(self):
for hash_name in list(signature_utils.HASH_METHODS.keys()):
self.assertIn(hash_name, self.hash_field.ALL)
def test_key_types(self):
key_type_dict = signature_utils.SignatureKeyType._REGISTERED_TYPES
key_types = list(key_type_dict.keys())
for key_type in key_types:
self.assertIn(key_type, self.key_type_field.ALL)
class TestResourceClass(TestString):
def setUp(self):
super(TestResourceClass, self).setUp()
self.field = fields.ResourceClassField()
self.coerce_good_values = [
('VCPU', 'VCPU'),
('MEMORY_MB', 'MEMORY_MB'),
('DISK_GB', 'DISK_GB'),
('PCI_DEVICE', 'PCI_DEVICE'),
('SRIOV_NET_VF', 'SRIOV_NET_VF'),
('NUMA_SOCKET', 'NUMA_SOCKET'),
('NUMA_CORE', 'NUMA_CORE'),
('NUMA_THREAD', 'NUMA_THREAD'),
('NUMA_MEMORY_MB', 'NUMA_MEMORY_MB'),
('IPV4_ADDRESS', 'IPV4_ADDRESS'),
]
self.coerce_bad_values = [object(), dict()]
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
class TestInteger(TestField):
def setUp(self):
super(TestInteger, self).setUp()
self.field = fields.IntegerField()
self.coerce_good_values = [(1, 1), ('1', 1)]
self.coerce_bad_values = ['foo', None]
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
class TestNonNegativeInteger(TestInteger):
def setUp(self):
super(TestNonNegativeInteger, self).setUp()
self.field = fields.Field(fields.NonNegativeInteger())
self.coerce_bad_values.extend(['-2', '4.2'])
class TestFloat(TestField):
def setUp(self):
super(TestFloat, self).setUp()
self.field = fields.FloatField()
self.coerce_good_values = [(1.1, 1.1), ('1.1', 1.1)]
self.coerce_bad_values = ['foo', None]
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
class TestNonNegativeFloat(TestFloat):
def setUp(self):
super(TestNonNegativeFloat, self).setUp()
self.field = fields.Field(fields.NonNegativeFloat())
self.coerce_bad_values.extend(['-4.2'])
class TestBoolean(TestField):
def setUp(self):
super(TestBoolean, self).setUp()
self.field = fields.BooleanField()
self.coerce_good_values = [(True, True), (False, False), (1, True),
('foo', True), (0, False), ('', False)]
self.coerce_bad_values = []
self.to_primitive_values = self.coerce_good_values[0:2]
self.from_primitive_values = self.coerce_good_values[0:2]
class TestDateTime(TestField):
def setUp(self):
super(TestDateTime, self).setUp()
self.dt = datetime.datetime(1955, 11, 5, tzinfo=iso8601.iso8601.Utc())
self.field = fields.DateTimeField()
self.coerce_good_values = [(self.dt, self.dt),
(utils.isotime(self.dt), self.dt)]
self.coerce_bad_values = [1, 'foo']
self.to_primitive_values = [(self.dt, utils.isotime(self.dt))]
self.from_primitive_values = [(utils.isotime(self.dt), self.dt)]
def test_stringify(self):
self.assertEqual(
'1955-11-05T18:00:00Z',
self.field.stringify(
datetime.datetime(1955, 11, 5, 18, 0, 0,
tzinfo=iso8601.iso8601.Utc())))
class TestDict(TestField):
def setUp(self):
super(TestDict, self).setUp()
self.field = fields.Field(fields.Dict(FakeFieldType()))
self.coerce_good_values = [({'foo': 'bar'}, {'foo': '*bar*'}),
({'foo': 1}, {'foo': '*1*'})]
self.coerce_bad_values = [{1: 'bar'}, 'foo']
self.to_primitive_values = [({'foo': 'bar'}, {'foo': '!bar!'})]
self.from_primitive_values = [({'foo': '!bar!'}, {'foo': 'bar'})]
def test_stringify(self):
self.assertEqual("{key=val}", self.field.stringify({'key': 'val'}))
class TestDictOfStrings(TestField):
def setUp(self):
super(TestDictOfStrings, self).setUp()
self.field = fields.DictOfStringsField()
self.coerce_good_values = [({'foo': 'bar'}, {'foo': 'bar'}),
({'foo': 1}, {'foo': '1'})]
self.coerce_bad_values = [{1: 'bar'}, {'foo': None}, 'foo']
self.to_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
self.from_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
def test_stringify(self):
self.assertEqual("{key='val'}", self.field.stringify({'key': 'val'}))
class TestDictOfIntegers(TestField):
def setUp(self):
super(TestDictOfIntegers, self).setUp()
self.field = fields.DictOfIntegersField()
self.coerce_good_values = [({'foo': '42'}, {'foo': 42}),
({'foo': 4.2}, {'foo': 4})]
self.coerce_bad_values = [{1: 'bar'}, {'foo': 'boo'},
'foo', {'foo': None}]
self.to_primitive_values = [({'foo': 42}, {'foo': 42})]
self.from_primitive_values = [({'foo': 42}, {'foo': 42})]
def test_stringify(self):
self.assertEqual("{key=42}", self.field.stringify({'key': 42}))
class TestDictOfStringsNone(TestField):
def setUp(self):
super(TestDictOfStringsNone, self).setUp()
self.field = fields.DictOfNullableStringsField()
self.coerce_good_values = [({'foo': 'bar'}, {'foo': 'bar'}),
({'foo': 1}, {'foo': '1'}),
({'foo': None}, {'foo': None})]
self.coerce_bad_values = [{1: 'bar'}, 'foo']
self.to_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
self.from_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
def test_stringify(self):
self.assertEqual("{k2=None,key='val'}",
self.field.stringify({'k2': None,
'key': 'val'}))
class TestListOfDictOfNullableStringsField(TestField):
def setUp(self):
super(TestListOfDictOfNullableStringsField, self).setUp()
self.field = fields.ListOfDictOfNullableStringsField()
self.coerce_good_values = [([{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}],
[{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}]),
([{'f': 1}, {'f1': 'b1'}],
[{'f': '1'}, {'f1': 'b1'}]),
([{'foo': None}], [{'foo': None}])]
self.coerce_bad_values = [[{1: 'a'}], ['ham', 1], ['eggs']]
self.to_primitive_values = [([{'f': 'b'}, {'f1': 'b1'}, {'f2': None}],
[{'f': 'b'}, {'f1': 'b1'}, {'f2': None}])]
self.from_primitive_values = [([{'f': 'b'}, {'f1': 'b1'},
{'f2': None}],
[{'f': 'b'}, {'f1': 'b1'},
{'f2': None}])]
def test_stringify(self):
self.assertEqual("[{f=None,f1='b1'},{f2='b2'}]",
self.field.stringify(
[{'f': None, 'f1': 'b1'}, {'f2': 'b2'}]))
class TestList(TestField):
def setUp(self):
super(TestList, self).setUp()
self.field = fields.Field(fields.List(FakeFieldType()))
self.coerce_good_values = [(['foo', 'bar'], ['*foo*', '*bar*'])]
self.coerce_bad_values = ['foo']
self.to_primitive_values = [(['foo'], ['!foo!'])]
self.from_primitive_values = [(['!foo!'], ['foo'])]
def test_stringify(self):
self.assertEqual('[123]', self.field.stringify([123]))
class TestListOfStrings(TestField):
def setUp(self):
super(TestListOfStrings, self).setUp()
self.field = fields.ListOfStringsField()
self.coerce_good_values = [(['foo', 'bar'], ['foo', 'bar'])]
self.coerce_bad_values = ['foo']
self.to_primitive_values = [(['foo'], ['foo'])]
self.from_primitive_values = [(['foo'], ['foo'])]
def test_stringify(self):
self.assertEqual("['abc']", self.field.stringify(['abc']))
class TestSet(TestField):
def setUp(self):
super(TestSet, self).setUp()
self.field = fields.Field(fields.Set(FakeFieldType()))
self.coerce_good_values = [(set(['foo', 'bar']),
set(['*foo*', '*bar*']))]
self.coerce_bad_values = [['foo'], {'foo': 'bar'}]
self.to_primitive_values = [(set(['foo']), tuple(['!foo!']))]
self.from_primitive_values = [(tuple(['!foo!']), set(['foo']))]
def test_stringify(self):
self.assertEqual('set([123])', self.field.stringify(set([123])))
class TestSetOfIntegers(TestField):
def setUp(self):
super(TestSetOfIntegers, self).setUp()
self.field = fields.SetOfIntegersField()
self.coerce_good_values = [(set(['1', 2]),
set([1, 2]))]
self.coerce_bad_values = [set(['foo'])]
self.to_primitive_values = [(set([1]), tuple([1]))]
self.from_primitive_values = [(tuple([1]), set([1]))]
def test_stringify(self):
self.assertEqual('set([1,2])', self.field.stringify(set([1, 2])))
class TestListOfSetsOfIntegers(TestField):
def setUp(self):
super(TestListOfSetsOfIntegers, self).setUp()
self.field = fields.ListOfSetsOfIntegersField()
self.coerce_good_values = [([set(['1', 2]), set([3, '4'])],
[set([1, 2]), set([3, 4])])]
self.coerce_bad_values = [[set(['foo'])]]
self.to_primitive_values = [([set([1])], [tuple([1])])]
self.from_primitive_values = [([tuple([1])], [set([1])])]
def test_stringify(self):
self.assertEqual('[set([1,2])]', self.field.stringify([set([1, 2])]))
class TestNetworkModel(TestField):
def setUp(self):
super(TestNetworkModel, self).setUp()
model = network_model.NetworkInfo()
self.field = fields.Field(fields.NetworkModel())
self.coerce_good_values = [(model, model), (model.json(), model)]
self.coerce_bad_values = [[], 'foo']
self.to_primitive_values = [(model, model.json())]
self.from_primitive_values = [(model.json(), model)]
def test_stringify(self):
networkinfo = network_model.NetworkInfo()
networkinfo.append(network_model.VIF(id=123))
networkinfo.append(network_model.VIF(id=456))
self.assertEqual('NetworkModel(123,456)',
self.field.stringify(networkinfo))
class TestNotificationPriority(TestField):
def setUp(self):
super(TestNotificationPriority, self).setUp()
self.field = fields.NotificationPriorityField()
self.coerce_good_values = [('audit', 'audit'),
('critical', 'critical'),
('debug', 'debug'),
('error', 'error'),
('sample', 'sample'),
('warn', 'warn')]
self.coerce_bad_values = ['warning']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'warn'", self.field.stringify('warn'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'warning')
class TestNotificationPhase(TestField):
def setUp(self):
super(TestNotificationPhase, self).setUp()
self.field = fields.NotificationPhaseField()
self.coerce_good_values = [('start', 'start'),
('end', 'end'),
('error', 'error')]
self.coerce_bad_values = ['begin']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'error'", self.field.stringify('error'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'begin')
class TestNotificationAction(TestField):
def setUp(self):
super(TestNotificationAction, self).setUp()
self.field = fields.NotificationActionField()
self.coerce_good_values = [('update', 'update')]
self.coerce_bad_values = ['magic']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'update'", self.field.stringify('update'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'magic')
class TestPCIAddress(TestField):
def setUp(self):
super(TestPCIAddress, self).setUp()
self.field = fields.Field(fields.PCIAddressField())
self.coerce_good_values = [('0000:00:02.0', '0000:00:02.0')]
self.coerce_bad_values = [
'000:00:02.0',
'0000:0:02.0',
'0000:00:2.0',
'0000:00:02.',
'-000:00:02.0',
'0000:0-:02.0',
'0000:00:-2.0',
'0000:00:02.-',
'000000:02.0',
'0000:0:02.0',
'0000:00:020',
]
self.to_primitive_values = self.coerce_good_values
self.from_primitive_values = self.coerce_good_values
class TestUSBAddress(TestField):
def setUp(self):
super(TestUSBAddress, self).setUp()
self.field = fields.Field(fields.USBAddressField())
self.coerce_good_values = [('0:0', '0:0')]
self.coerce_bad_values = [
'00',
'0:',
'0.0',
'-.0',
]
self.to_primitive_values = self.coerce_good_values
self.from_primitive_values = self.coerce_good_values
class TestSCSIAddress(TestField):
def setUp(self):
super(TestSCSIAddress, self).setUp()
self.field = fields.Field(fields.SCSIAddressField())
self.coerce_good_values = [('1:0:2:0', '1:0:2:0')]
self.coerce_bad_values = [
'1:0:2',
'-:0:2:0',
'1:-:2:0',
'1:0:-:0',
'1:0:2:-',
]
self.to_primitive_values = self.coerce_good_values
self.from_primitive_values = self.coerce_good_values
class TestIDEAddress(TestField):
def setUp(self):
super(TestIDEAddress, self).setUp()
self.field = fields.Field(fields.IDEAddressField())
self.coerce_good_values = [('0:0', '0:0')]
self.coerce_bad_values = [
'0:2',
'00',
'0',
]
self.to_primitive_values = self.coerce_good_values
self.from_primitive_values = self.coerce_good_values
class TestSecureBoot(TestField):
def setUp(self):
super(TestSecureBoot, self).setUp()
self.field = fields.SecureBoot()
self.coerce_good_values = [('required', 'required'),
('disabled', 'disabled'),
('optional', 'optional')]
self.coerce_bad_values = ['enabled']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'required'", self.field.stringify('required'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'enabled')
|
|
# -*- coding: utf-8 -*-
import datetime
import json
import os
import os.path
import requests
from . import sitecfg
esi_proxies = None
def set_esi_proxies(proxies: dict):
global esi_proxies
esi_proxies = proxies
class ESIException(Exception):
def __init__(self, msg: str = ''):
self.msg = msg
def error_string(self) -> str:
return self.msg
def analyze_esi_response_headers(headers: dict) -> None:
"""
Keep track of ESI headers: watch for deprecated endpoints
and error rate limiting
:param headers: requests's resonse headers dict
:return:
"""
lines_to_log = []
dt_now_str = str(datetime.datetime.utcnow()) + ' UTC: ' # '2018-03-09 11:16:11.178443 UTC: '
if 'warning' in headers:
lines_to_log.append(dt_now_str + 'warning header: {}'.format(headers['warning']))
if 'X-ESI-Error-Limit-Remain' in headers:
errors_remain = int(headers['X-ESI-Error-Limit-Remain'])
if errors_remain < 10:
lines_to_log.append(dt_now_str + 'X-ESI-Error-Limit-Remain < {} !!!!!!\n'.format(errors_remain))
elif errors_remain < 50:
lines_to_log.append(dt_now_str + 'X-ESI-Error-Limit-Remain < {} !!!\n'.format(errors_remain))
if len(lines_to_log) < 1:
return
try:
# auto-create logs subdir
if not os.path.isdir('logs'):
os.mkdir('logs')
fn = './logs/esi-warnings.log'
with open(fn, mode='at', encoding='utf-8') as f:
f.writelines(lines_to_log)
except IOError:
pass
def universe_names(cfg: sitecfg.SiteConfig, ids_list: list) -> list:
global esi_proxies
ret = []
error_str = ''
if len(ids_list) <= 0:
return ret
try:
# https://esi.evetech.net/ui/?version=latest#/Universe/post_universe_names
url = '{}/universe/names/'.format(cfg.ESI_BASE_URL)
ids_str = '['
for an_id in set(ids_list):
if len(ids_str) > 1:
ids_str += ','
ids_str += str(an_id)
ids_str += ']'
r = requests.post(url,
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': cfg.SSO_USER_AGENT
},
data=ids_str,
proxies=esi_proxies,
timeout=20)
response_text = r.text
if r.status_code == 200:
ret = json.loads(response_text)
analyze_esi_response_headers(r.headers)
else:
obj = json.loads(response_text)
if 'error' in obj:
error_str = 'ESI error: {}'.format(obj['error'])
else:
error_str = 'Error connecting to ESI server: HTTP status {}'.format(r.status_code)
except requests.exceptions.RequestException as e:
error_str = 'Error connection to ESI server: {}'.format(str(e))
except json.JSONDecodeError:
error_str = 'Failed to parse response JSON from CCP ESI server!'
if error_str != '':
raise ESIException(error_str)
# ret == [{'category': 'character', 'name': 'Xur Hermit', 'id': 2114246032}]
return ret
def public_data(cfg: sitecfg.SiteConfig, char_id: int) -> dict:
global esi_proxies
ret = {
'error': '',
'char_id': char_id,
'char_name': '',
'corp_id': 0,
'corp_name': '',
'corp_ticker': '',
'corp_member_count': 0,
'ally_id': 0
}
try:
# We need to send 2 requests, first get corpiration_id from character info,
# next - get corporation name by corporation_id. Both of these calls do
# not require authentication in ESI scopes.
# 1. first request for character public details
# https://esi.tech.ccp.is/latest/#!/Character/get_characters_character_id
# This route is cached for up to 3600 seconds
url = '{}/characters/{}/'.format(cfg.ESI_BASE_URL, char_id)
r = requests.get(url,
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': cfg.SSO_USER_AGENT
},
proxies=esi_proxies,
timeout=10)
obj = json.loads(r.text)
if r.status_code == 200:
details = json.loads(r.text)
ret['char_name'] = details['name']
ret['corp_id'] = details['corporation_id']
analyze_esi_response_headers(r.headers)
else:
if 'error' in obj:
ret['error'] = 'ESI error: {}'.format(obj['error'])
else:
ret['error'] = 'Error connecting to ESI server: HTTP status {}'.format(r.status_code)
# 2. second request for corporation public details
# https://esi.tech.ccp.is/latest/#!/Corporation/get_corporations_corporation_id
# This route is cached for up to 3600 seconds
url = '{}/corporations/{}/'.format(cfg.ESI_BASE_URL, ret['corp_id'])
r = requests.get(url,
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': cfg.SSO_USER_AGENT
},
proxies=esi_proxies,
timeout=10)
obj = json.loads(r.text)
if r.status_code == 200:
details = json.loads(r.text)
ret['corp_name'] = str(details['name'])
ret['corp_ticker'] = str(details['ticker'])
ret['corp_member_count'] = str(details['member_count'])
if 'alliance_id' in details: # it may be not present
ret['ally_id'] = str(details['alliance_id'])
analyze_esi_response_headers(r.headers)
else:
if 'error' in obj:
ret['error'] = 'ESI error: {}'.format(obj['error'])
else:
ret['error'] = 'Error connecting to ESI server: HTTP status {}'.format(r.status_code)
except requests.exceptions.RequestException as e:
ret['error'] = 'Error connection to ESI server: {}'.format(str(e))
except json.JSONDecodeError:
ret['error'] = 'Failed to parse response JSON from CCP ESI server!'
return ret
def do_refresh_token(cfg: sitecfg.SiteConfig, refresh_token: str) -> dict:
res = {
'error': '',
'sso_expire_dt_utc': '',
'del': {
'sso_token': '',
'sso_refresh_token': '',
'sso_expire_dt': '',
'sso_expire_dt_utc': ''
}
}
try:
r = requests.post('https://login.eveonline.com/oauth/token',
auth=(cfg.SSO_CLIENT_ID, cfg.SSO_SECRET_KEY),
headers={
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': cfg.SSO_USER_AGENT
},
data={
'grant_type': 'refresh_token',
'refresh_token': refresh_token
},
timeout=10)
if (r.status_code >= 200) and (r.status_code < 300):
response_text = r.text
details = json.loads(response_text)
# calculate expire datetime
expires_in = int(details['expires_in'])
td = datetime.timedelta(seconds=expires_in)
dt_now = datetime.datetime.now()
dt_utcnow = datetime.datetime.utcnow()
dt_expire = dt_now + td
dt_utcexpire = dt_utcnow + td
# form reply dict
res['sso_expire_dt_utc'] = dt_utcexpire.strftime('%Y-%m-%dT%H:%M:%SZ')
res['del']['sso_token'] = details['access_token']
res['del']['sso_refresh_token'] = details['refresh_token']
res['del']['sso_expire_dt'] = dt_expire
res['del']['sso_expire_dt_utc'] = dt_utcexpire
else:
# some SSO error
res['error'] = 'Error during communication to login.eveonline.com ' \
'(refresh token), HTTP error={}'.format(r.status_code)
except requests.exceptions.RequestException as req_e:
res['error'] = 'Error during communication to login.eveonline.com ' \
'(refresh token): ' + str(req_e)
except json.JSONDecodeError as json_e:
res['error'] = 'Error decoding server response from ' \
'login.eveonline.com! (refresh token)' + str(json_e)
return res
def location_online(cfg: sitecfg.SiteConfig, char_id: int, access_token: str) -> dict:
global esi_proxies
ret = {
'error': '',
'is_online': False
}
try:
# https://esi.tech.ccp.is/latest/#!/Location/get_characters_character_id_online
# This route is cached for up to 60 seconds
url = '{}/characters/{}/online/'.format(cfg.ESI_BASE_URL, char_id)
r = requests.get(url,
headers={
'Authorization': 'Bearer ' + access_token,
'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': cfg.SSO_USER_AGENT
},
proxies=esi_proxies,
timeout=10)
response_text = r.text
# '{"last_login":"2018-05-22T22:52:32Z","last_logout":"2018-05-19T20:43:44Z","logins":505,"online":true}'
if r.status_code == 200:
obj = json.loads(r.text)
ret['is_online'] = obj['online']
ret['online'] = obj['online']
ret['logins'] = obj['logins']
ret['error'] = ''
analyze_esi_response_headers(r.headers)
else:
obj = json.loads(r.text)
if 'error' in obj:
ret['error'] = 'ESI error: {}'.format(obj['error'])
else:
ret['error'] = 'Error connecting to ESI server: HTTP status {}'.format(r.status_code)
except requests.exceptions.RequestException as e:
ret['error'] = 'Error connection to ESI server: {}'.format(str(e))
except json.JSONDecodeError:
ret['error'] = 'Failed to parse response JSON from CCP ESI server!'
return ret
def location_ship(cfg: sitecfg.SiteConfig, char_id: int, access_token: str) -> dict:
global esi_proxies
ret = {
'error': '',
'ship_name': '',
'ship_type_id': 0
}
try:
# https://esi.tech.ccp.is/latest/#!/Location/get_characters_character_id_ship
# This route is cached for up to 5 seconds
url = '{}/characters/{}/ship/'.format(cfg.ESI_BASE_URL, char_id)
r = requests.get(url,
headers={
'Authorization': 'Bearer ' + access_token,
'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': cfg.SSO_USER_AGENT
},
proxies=esi_proxies,
timeout=10)
obj = json.loads(r.text)
if r.status_code == 200:
details = json.loads(r.text)
ret['ship_name'] = str(details['ship_name'])
ret['ship_type_id'] = int(details['ship_type_id'])
analyze_esi_response_headers(r.headers)
else:
if 'error' in obj:
ret['error'] = 'ESI error: {}'.format(obj['error'])
else:
ret['error'] = 'Error connecting to ESI server: HTTP status {}'.format(r.status_code)
except requests.exceptions.RequestException as e:
ret['error'] = 'Error connection to ESI server: {}'.format(str(e))
except json.JSONDecodeError:
ret['error'] = 'Failed to parse response JSON from CCP ESI server!'
return ret
def location_location(cfg: sitecfg.SiteConfig, char_id: int, access_token: str) -> dict:
global esi_proxies
ret = {
'error': '',
'solarsystem_id': 0,
'solarsystem_name': '',
'is_whsystem': False,
'is_docked': False,
'structure_id': 0,
'station_id': 0
}
try:
# https://esi.tech.ccp.is/latest/#!/Location/get_characters_character_id_location
# Information about the characters current location. Returns the current solar system id,
# # and also the current station or structure ID if applicable.
# This route is cached for up to 5 seconds
url = '{}/characters/{}/location/'.format(cfg.ESI_BASE_URL, char_id)
r = requests.get(url,
headers={
'Authorization': 'Bearer ' + access_token,
'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': cfg.SSO_USER_AGENT
},
proxies=esi_proxies,
timeout=10)
obj = json.loads(r.text)
if r.status_code == 200:
details = json.loads(r.text)
ret['solarsystem_id'] = int(details['solar_system_id'])
if 'structure_id' in details:
ret['is_docked'] = True
ret['structure_id'] = details['structure_id']
if 'station_id' in details:
ret['is_docked'] = True
ret['station_id'] = details['station_id']
analyze_esi_response_headers(r.headers)
else:
if 'error' in obj:
ret['error'] = 'ESI error: {}'.format(obj['error'])
else:
ret['error'] = 'Error connecting to ESI server: HTTP status {}'.format(r.status_code)
except requests.exceptions.RequestException as e:
ret['error'] = 'Error connection to ESI server: {}'.format(str(e))
except json.JSONDecodeError:
ret['error'] = 'Failed to parse response JSON from CCP ESI server!'
return ret
def market_region_orders(cfg: sitecfg.SiteConfig, region_id: int, order_type: str, optional_type_id: int = None) -> list:
global esi_proxies
ret = []
error_str = ''
if region_id < 0:
return ret
if order_type not in ['buy', 'sell', 'all']:
raise ValueError('order_type must be one of: "buy", "sell", "all"')
try:
# https://esi.tech.ccp.is/latest/#!/Market/get_markets_region_id_orders
# This route is cached for up to 300 seconds
# example request URL: https://esi.tech.ccp.is/latest/markets/10000002/orders/?order_type=sell&type_id=30377
url = '{}/markets/{}/orders/'.format(cfg.ESI_BASE_URL, region_id)
get_params = {
'order_type': order_type
}
if optional_type_id is not None:
get_params['type_id'] = optional_type_id
r = requests.get(url,
params=get_params,
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': cfg.SSO_USER_AGENT
},
proxies=esi_proxies,
timeout=20)
response_text = r.text
if r.status_code == 200:
ret = json.loads(response_text)
analyze_esi_response_headers(r.headers)
else:
obj = json.loads(response_text)
if 'error' in obj:
error_str = 'ESI error: {}'.format(obj['error'])
else:
error_str = 'Error connecting to ESI server: HTTP status {}'.format(r.status_code)
except requests.exceptions.RequestException as e:
error_str = 'Error connection to ESI server: {}'.format(str(e))
except json.JSONDecodeError:
error_str = 'Failed to parse response JSON from CCP ESI server!'
if error_str != '':
raise ESIException(error_str)
return ret
def ui_open_window_information(cfg: sitecfg.SiteConfig, target_id: int, access_token: str) -> bool:
"""
Open the information window for a character, corporation or alliance inside the client
:param cfg: configuration
:param target_id: can be character_id, corporation_id, alliance_id
:param access_token: SSO access token string
:return: true - request received, on error ESIExceprtion is thrown
"""
global esi_proxies
ret = False
error_str = ''
if target_id < 0:
return False
try:
# https://esi.tech.ccp.is/latest/#!/User32Interface/post_ui_openwindow_information
url = '{}/ui/openwindow/information/'.format(cfg.ESI_BASE_URL)
r = requests.post(url,
params={'target_id': target_id},
headers={
'Authorization': 'Bearer ' + access_token,
'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': cfg.SSO_USER_AGENT
},
proxies=esi_proxies,
timeout=10)
# only check return code. 204 is "reqeust accepted"
if (r.status_code >= 200) and (r.status_code <= 299):
ret = True
analyze_esi_response_headers(r.headers)
else:
error_str = 'Error connecting to ESI server: HTTP status {}'.format(r.status_code)
except requests.exceptions.RequestException as e:
error_str = 'Error connection to ESI server: {}'.format(str(e))
except json.JSONDecodeError:
error_str = 'Failed to parse response JSON from CCP ESI server!'
if error_str != '':
raise ESIException(error_str)
return ret
def get_killmail_by_id_hash(cfg: sitecfg.SiteConfig, kill_id: str, kill_hash: str) -> dict:
"""
Get killmail JSON info
:param cfg: configuration
:param kill_id: id like 72725284
:param kill_hash: long hash like 56a83bf9445ad4ed88426b19e600e801e6ab57f4
:return: returned JSON from API as python dict
"""
global esi_proxies
ret = {}
error_str = ''
try:
# https://esi.evetech.net/ui/#/Killmails/get_killmails_killmail_id_killmail_hash
# GET /killmails/{killmail_id}/{killmail_hash}/
url = '{}/killmails/{}/{}/'.format(cfg.ESI_BASE_URL, kill_id, kill_hash)
r = requests.get(url,
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': cfg.SSO_USER_AGENT
},
proxies=esi_proxies,
timeout=10)
# only check return code. 204 is "reqeust accepted"
if (r.status_code >= 200) and (r.status_code <= 299):
ret = json.loads(r.text)
analyze_esi_response_headers(r.headers)
else:
error_str = 'Error connecting to ESI server: HTTP status {}'.format(r.status_code)
except requests.exceptions.RequestException as e:
error_str = 'Error connection to ESI server: {}'.format(str(e))
except json.JSONDecodeError:
error_str = 'Failed to parse response JSON from CCP ESI server!'
if error_str != '':
raise ESIException(error_str)
return ret
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import operator
import uuid
from functools import reduce
from itertools import chain
from django.conf import settings
from django.contrib import messages
from django.contrib.admin.models import ADDITION, CHANGE, LogEntry
from django.contrib.auth.decorators import permission_required
from django.core import signing
from django.core.urlresolvers import reverse
from django.db import models, transaction
from django.forms.formsets import formset_factory
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
from django.views.generic import DetailView, FormView, ListView
from django.views.decorators.http import require_POST
from ..attendees.exporters import BadgeExporter
from ..attendees.models import (Purchase, Ticket, TicketType, SIMCardTicket,
SupportTicket, VenueTicket)
from ..attendees.tasks import render_invoice
from ..attendees.utils import generate_invoice_number
from ..conference.models import current_conference
from .exporters import generate_badge
from .forms import (OnDeskPurchaseForm, EditOnDeskTicketForm,
NewOnDeskTicketForm, BaseOnDeskTicketFormSet, SearchForm, get_users,
get_sponsors)
def ctype(obj):
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj)
class CheckinViewMixin(object):
@method_decorator(permission_required('accounts.see_checkin_info'))
def dispatch(self, *args, **kwargs):
return super(CheckinViewMixin, self).dispatch(*args, **kwargs)
class SearchFormMixin(object):
def get_context_data(self, **kwargs):
context = super(SearchFormMixin, self).get_context_data(**kwargs)
context['search_form'] = SearchForm(self.request.GET)
return context
class SearchView(CheckinViewMixin, SearchFormMixin, ListView):
template_name = 'checkin/search.html'
model = Ticket
context_object_name = 'results'
search_fields = (
'user__id',
'user__username',
'user__email',
'user__profile__full_name',
'user__profile__display_name',
'id',
'purchase__id',
'purchase__company_name',
'purchase__first_name',
'purchase__last_name',
'purchase__email',
'purchase__invoice_number',
'purchase__user__id',
'purchase__user__username',
'purchase__user__email',
'purchase__user__profile__full_name',
'purchase__user__profile__display_name',
'simcardticket__first_name',
'simcardticket__last_name',
'venueticket__first_name',
'venueticket__last_name',
)
def get_context_data(self, **kwargs):
context = super(SearchView, self).get_context_data(**kwargs)
context['searched'] = 'query' in self.request.GET
return context
def get_queryset(self):
queryset = self.model.objects.select_related(
'user',
'user__profile',
'purchase',
'purchase__user',
'purchase__user__profile',
'simcardticket',
'venueticket',
'ticket_type',
'ticket_type__content_type',
).filter(
models.Q(simcardticket__isnull=False) |
models.Q(venueticket__isnull=False)
)
for term in self.search_terms:
queries = [
models.Q(**{search_field + '__icontains': term})
for search_field in self.search_fields
]
queryset = queryset.filter(reduce(operator.or_, queries))
return queryset
def get(self, *args, **kwargs):
self.object_list = []
self.search_terms = self.request.GET.get('query', '').split()
if self.search_terms:
tickets = self.get_queryset()
for ticket in tickets:
obj = {
'ticket': {
'id': ticket.id,
}
}
if ticket.user is None:
obj['ticket'].update({
'full_name': ticket.real_ticket.first_name + ' ' + ticket.real_ticket.last_name,
'organisation': getattr(ticket.real_ticket, 'organisation', None)
})
else:
obj['ticket'].update({
'user_id': ticket.user_id,
'username': ticket.user.username,
'email': ticket.user.email,
'full_name': ticket.user.profile.full_name,
'display_name': ticket.user.profile.display_name,
'organisation': ticket.user.profile.organisation
})
obj['purchase'] = {
'id': ticket.purchase.id,
'company_name': ticket.purchase.company_name,
'invoice_number': ticket.purchase.invoice_number,
'name': ticket.purchase.first_name + ' ' + ticket.purchase.last_name,
'email': ticket.purchase.email
}
if ticket.purchase.user_id:
obj['buyer'] = {
'user_id': ticket.purchase.user_id,
'username': ticket.purchase.user.username,
'email': ticket.purchase.user.email,
'full_name': ticket.purchase.user.profile.full_name,
'display_name': ticket.purchase.user.profile.display_name,
'organisation': ticket.purchase.user.profile.organisation
}
self.object_list.append(obj)
context = self.get_context_data(
search_terms=self.search_terms,
object_list=self.object_list
)
return self.render_to_response(context)
search_view = SearchView.as_view()
class OnDeskPurchaseView(CheckinViewMixin, SearchFormMixin, FormView):
form_class = OnDeskPurchaseForm
salt = 'pyconde.checkin.purchase'
stage = 'form'
template_name = 'checkin/ondesk_purchase_form.html'
template_name_preview = 'checkin/ondesk_purchase_form_preview.html'
ticket_formset_class = BaseOnDeskTicketFormSet
ticket_form_class = NewOnDeskTicketForm
timeout = 15*60 # seconds after which the preview timed out
@method_decorator(permission_required('accounts.perform_purchase'))
def dispatch(self, *args, **kwargs):
return super(OnDeskPurchaseView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
formset_class = formset_factory(self.ticket_form_class,
formset=self.ticket_formset_class,
extra=1)
formset = formset_class()
return self.render_to_response(self.get_context_data(form=form,
formset=formset))
def post(self, request, *args, **kwargs):
if self.request.POST.get('signed_data', None) is not None:
# Verify existing session
if not self.verify_session():
messages.error(request, _('Purchase session timeout or purchase already processed'))
return HttpResponseRedirect(reverse('checkin_purchase'))
# We do the actual submit
return self.form_post()
else:
self.start_session()
# We perform the preview
form_class = self.get_form_class()
form = self.get_form(form_class)
formset_class = formset_factory(self.ticket_form_class,
formset=self.ticket_formset_class,
extra=1)
formset = formset_class(data=self.request.POST)
valid = (form.is_valid(), formset.is_valid(),)
if all(valid):
return self.form_valid(form, formset)
else:
return self.form_invalid(form, formset)
def form_post(self):
# Do the actual booking process. We already verified the data in
# the preview step, and use the data from signed data package.
self.stage = 'post'
signed_data = self.request.POST.get('signed_data')
try:
data = signing.loads(signed_data, salt=self.salt, max_age=self.timeout)
with transaction.commit_manually():
# TODO:
# set form.email to some value
try:
purchase = Purchase(**data['purchase'])
purchase.conference = current_conference()
purchase.state = 'new'
purchase.payment_method = 'invoice'
purchase.save()
for td in data['tickets']:
ticket_type = TicketType.objects.select_related('content_type') \
.get(id=td['ticket_type_id'])
TicketClass = ticket_type.content_type.model_class()
ticket = TicketClass(**td)
ticket.purchase = purchase
ticket.save()
purchase.payment_total = purchase.calculate_payment_total()
purchase.save(update_fields=['payment_total'])
purchase.invoice_number = generate_invoice_number()
purchase.save(update_fields=['invoice_number'])
LogEntry.objects.log_action(
user_id=self.request.user.pk,
content_type_id=ctype(purchase).pk,
object_id=purchase.pk,
object_repr=force_text(purchase),
action_flag=ADDITION,
change_message='Checkin: Purchase created'
)
self.object = purchase
except Exception as e:
print(e)
transaction.rollback()
messages.error(self.request, _('An error occured while processing the purchase'))
return HttpResponseRedirect(reverse('checkin_purchase'))
else:
# Delete the purchase_key first in case a database error occurs
del self.request.session['purchase_key']
transaction.commit()
messages.success(self.request, _('Purchase successful!'))
render_invoice.delay(purchase_id=purchase.id,
send_purchaser=False)
return HttpResponseRedirect(self.get_success_url())
except signing.SignatureExpired:
messages.error(self.request, _('Session timed out. Please restart the purchase process.'))
except signing.BadSignature:
messages.error(self.request, _('Invalid data. Please restart the purchase process.'))
return HttpResponseRedirect(reverse('checkin_purchase'))
def form_valid(self, form, formset):
# We allow users to preview their purchase.
# We serialize all form data into one json object that is then
# signed using django.core.signing
self.stage = 'preview'
serialized_data = self.serialize(form, formset)
signed_data = signing.dumps(serialized_data, salt=self.salt, compress=True)
purchase = form.cleaned_data
tickets = []
payment_total = 0.0
for tform in formset.changed_forms:
t = tform.cleaned_data
# Copy for template access
t['ticket_type'] = t['ticket_type_id']
t['user'] = t['user_id']
t['sponsor'] = t['sponsor_id']
payment_total += t['ticket_type_id'].fee
tickets.append(t)
purchase['payment_total'] = payment_total
ctx = self.get_context_data(signed_data=signed_data,
purchase=purchase,
tickets=tickets)
return self.render_to_response(ctx)
def form_invalid(self, form, formset):
ctx = self.get_context_data(form=form, formset=formset)
return self.render_to_response(ctx)
def get_context_data(self, **kwargs):
ctx = super(OnDeskPurchaseView, self).get_context_data(**kwargs)
ctx['purchase_key'] = self.request.session.get('purchase_key')
ctx['stage'] = self.stage
if self.stage == 'preview':
pass
else:
ctx['empty_form'] = ctx['formset'].empty_form
return ctx
def get_success_url(self):
return reverse('checkin_purchase_detail', kwargs={'pk': self.object.pk})
def get_template_names(self):
if self.stage == 'preview':
return [self.template_name_preview]
return super(OnDeskPurchaseView, self).get_template_names()
def serialize(self, form, formset):
data = {}
data['purchase'] = {bf.name: bf.data for bf in form}
ticket_data = []
for tf in formset.changed_forms:
ticket_data.append({bf.name: bf.data for bf in tf})
data['tickets'] = ticket_data
return data
def start_session(self):
# Start new purchase session
self.request.session['purchase_key'] = force_text(uuid.uuid4())
def verify_session(self):
# A session is only valid if the key exists in the POST data and the
# session and the key is not None or ''
purchase_key_session = self.request.session.get('purchase_key', None)
purchase_key = self.request.POST.get('purchase_key', None)
return purchase_key and purchase_key_session == purchase_key
purchase_view = OnDeskPurchaseView.as_view()
class OnDeskPurchaseDetailView(CheckinViewMixin, SearchFormMixin, DetailView):
model = Purchase
template_name = 'checkin/ondesk_purchase_detail.html'
def get_context_data(self, **kwargs):
ctx = super(OnDeskPurchaseDetailView, self).get_context_data(**kwargs)
venues = VenueTicket.objects.filter(purchase_id=self.object.id).all()
sims = SIMCardTicket.objects.filter(purchase_id=self.object.id).all()
sups = SupportTicket.objects.filter(purchase_id=self.object.id).all()
ctx['tickets'] = chain(venues, sims, sups)
return ctx
def get_queryset(self):
qs = super(OnDeskPurchaseDetailView, self).get_queryset()
qs = qs.select_related('ticket_set__ticket_type__content_type')
return qs
purchase_detail_view = OnDeskPurchaseDetailView.as_view()
@permission_required('accounts.see_checkin_info')
def purchase_invoice_view(request, pk):
purchase = get_object_or_404(Purchase, pk=pk)
if purchase.exported:
response = HttpResponse(content_type='application/pdf')
ext = '.json' if settings.PURCHASE_INVOICE_DISABLE_RENDERING else '.pdf'
filename = '%s%s' % (purchase.full_invoice_number, ext)
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
with open(purchase.invoice_filepath, 'rb') as f:
response.write(f.read())
return response
else:
messages.error(request, _('Invoice not yet exported.'))
url = reverse('checkin_purchase_detail', kwargs={'pk': purchase.pk})
return HttpResponseRedirect(url)
@permission_required('accounts.see_checkin_info')
def purchase_badges_view(request, pk):
purchase = get_object_or_404(Purchase, pk=pk)
tickets = VenueTicket.objects.filter(purchase_id=purchase.pk)
return ticket_badge_view(request, tickets)
@require_POST
@permission_required('accounts.see_checkin_info')
@permission_required('accounts.perform_purchase')
def purchase_update_state(request, pk, new_state):
purchase = get_object_or_404(Purchase, pk=pk)
states = {
'paid': 'payment_received',
'unpaid': 'invoice_created',
'cancel': 'canceled',
}
state = states.get(new_state, None)
if state:
old_state = purchase.state
purchase.state = state
purchase.save(update_fields=['state'])
messages.success(request, _('Purchase marked as %(state)s.') % {
'state': new_state})
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=ctype(purchase).pk,
object_id=purchase.pk,
object_repr=force_text(purchase),
action_flag=CHANGE,
change_message='Checkin: state changed from %s to %s' % (old_state, state)
)
else:
messages.warning(request, _('Invalid state.'))
url = reverse('checkin_purchase_detail', kwargs={'pk': purchase.pk})
return HttpResponseRedirect(url)
class OnDeskTicketUpdateView(CheckinViewMixin, SearchFormMixin, FormView):
form_class = EditOnDeskTicketForm
model = VenueTicket
template_name = 'checkin/ondesk_ticket_form.html'
def get(self, request, *args, **kwargs):
self.object = get_object_or_404(self.model, pk=kwargs.get('pk'))
return super(OnDeskTicketUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(self.model, pk=kwargs.get('pk'))
return super(OnDeskTicketUpdateView, self).post(request, *args, **kwargs)
def form_valid(self, form):
for k, v in form.cleaned_data.items():
setattr(self.object, k, v)
self.object.save(update_fields=form.cleaned_data.keys())
LogEntry.objects.log_action(
user_id=self.request.user.pk,
content_type_id=ctype(self.object).pk,
object_id=self.object.pk,
object_repr=force_text(self.object),
action_flag=CHANGE,
change_message='Checkin: %s' % ', '. join(
'%s changed to %s' % (k, form.cleaned_data[k])
for k in form.changed_data
)
)
messages.success(self.request, _('Ticket sucessfully updated.'))
return super(OnDeskTicketUpdateView, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(OnDeskTicketUpdateView, self).get_form_kwargs()
kwargs.update({
'users': get_users(),
'sponsors': get_sponsors()
})
return kwargs
def get_initial(self):
return {
'first_name': self.object.first_name,
'last_name': self.object.last_name,
'organisation': self.object.organisation,
'user_id': self.object.user_id,
'sponsor_id': self.object.sponsor_id,
}
def get_success_url(self):
return reverse('checkin_purchase_detail', kwargs={'pk': self.object.purchase.pk})
ticket_update_view = OnDeskTicketUpdateView.as_view()
@permission_required('accounts.see_checkin_info')
def ticket_badge_view(request, pk):
if isinstance(pk, models.query.QuerySet):
ticket = pk
else:
ticket = VenueTicket.objects.filter(pk=pk).select_related('purchase')
ticket = ticket.filter(canceled=False)
count = ticket.count()
if count == 0:
raise Http404
if ticket[0].purchase.state != 'payment_received':
messages.error(request, _('Invoice not yet paid.'))
url = reverse('checkin_purchase_detail', kwargs={'pk': ticket[0].purchase_id})
return HttpResponseRedirect(url)
be = BadgeExporter(ticket, 'https://ep14.org/u{uid}', indent=False)
data = be.export()
pdf = generate_badge(data)
if pdf is not None:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="badge.pdf"'
response.write(pdf)
return response
else:
msg = ungettext_lazy('Error generating the badge',
'Error generating the badges',
count)
messages.error(request, msg)
url = reverse('checkin_purchase_detail', kwargs={'pk': ticket[0].purchase_id})
return HttpResponseRedirect(url)
|
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# probfit Basic Tutorial
# <markdowncell>
# [probfit](http://iminuit.github.io/probfit/) is a modeling / fitting package to be used together with [iminuit](http://iminuit.github.com/iminuit/).
#
# This tutorial is a fast-paced introduction to the probfit features:
#
# * built-in common models: polynomial, gaussian, ...
# * build-in common fit statistics: chi^2, binned and unbinned likelihood
# * tools to get your fits to converge and check the results: try_uml, draw, draw_residuals, ...
# * tools to help you implement your own models and fit statistics: Normalize, Extended, integrate_1d, ...
#
# Please start this notebook with the ``ipython --pylab=inline`` option to get inline plots.
# <codecell>
# We assume you have executed this cell in all the following examples
import numpy as np
import matplotlib.pyplot as plt
import iminuit
import probfit
# <markdowncell>
# In your own code you can explicitly import what you need to save
# typing in interactive sessions, e.g.
#
# from iminuit import Minuit, describe
# from probfit import gaussian, BinnedLH
#
# We don't do this here, we only import `iminuit` and `probfit` into our
# namespace so that it is clear to you which functions and classes come
# from which package while reading the code below.
# <markdowncell>
# ## Chi^2 straight line fit
#
# We can't really call this a fitting package without being able to fit a straight line, right?
# <codecell>
# Let's make a straight line with gaussian(mu=0, sigma=1) noise
np.random.seed(0)
x = np.linspace(0, 10, 20)
y = 3 * x + 15 + np.random.randn(len(x))
err = np.ones(len(x))
plt.errorbar(x, y, err, fmt='.');
# <codecell>
# Let's define our line.
# First argument has to be the independent variable,
# arguments after that are shape parameters.
def line(x, m, c): # define it to be parabolic or whatever you like
return m * x + c
# <codecell>
iminuit.describe(line)
# <codecell>
# Define a chi^2 cost function
chi2 = probfit.Chi2Regression(line, x, y, err)
# <codecell>
# Chi2Regression is just a callable object; nothing special about it
iminuit.describe(chi2)
# <codecell>
# minimize it
# yes, it gives you a heads up that you didn't give it initial value
# we can ignore it for now
minuit = iminuit.Minuit(chi2) # see iminuit tutorial on how to give initial value/range/error
minuit.migrad(); # MIGRAD is a very stable robust minimization method
# you can look at your terminal to see what it is doing;
# <codecell>
# The output above is a pretty-printed summary of the fit results from
# minuit.print_fmin()
# which was automatically called by iminuit.Minuit.migrad() after running MIGRAD.
# Let's see our results as Python dictionaries ...
print(minuit.values)
print(minuit.errors)
# <markdowncell>
# #### Parabolic error
# is calculated using the second derivative at the minimum
# This is good in most cases where the uncertainty is symmetric not much correlation
# exists. Migrad usually got this accurately but if you want ot be sure
# call `minuit.hesse()` after calling `minuit.migrad()`.
#
# #### Minos Error
# is obtained by scanning the chi^2 or likelihood profile and find the point
# where chi^2 is increased by `minuit.errordef`. Note that in the Minuit documentation
# and output `errordef` is often called `up` ... it's the same thing.
#
# #### What `errordef` should I use?
#
# As explained in the Minuit documentation you should use:
#
# * `errordef = 1` for chi^2 fits
# * `errordef = 0.5` for likelihood fits
#
# `errordef=1` is the default, so you only have to set it to `errordef=0.5`
# if you are defining a likelihood cost function (if you don't your HESSE and MINOS errors will be incorrect).
# `probfit` helps you by defining a `default_errordef()` attribute on the
# cost function classes, which is automatically detected by the `Minuit` constructor
# and can be used to set `Minuit.errordef` correctly, so that users can't forget.
# Classes used in this tutorial:
#
# * `probfit.Chi2Regression.get_errordef()` and `probfit.BinnedChi2.get_errordef()` return 1.
# * `probfit.BinnedLH.get_errordef()` and `probfit.UnbinnedLH.get_errordef()` return 0.5.
# <codecell>
# Let's visualize our line
chi2.draw(minuit)
# looks good;
# <codecell>
# Sometimes we want the error matrix (a.k.a. covariance matrix)
print('error matrix:')
print(minuit.matrix())
# or the correlation matrix
print('correlation matrix:')
print(minuit.matrix(correlation=True))
# or a pretty html representation
# Note that `print_matrix()` shows the correlation matrix, not the error matrix
minuit.print_matrix()
# <markdowncell>
# ## Binned Poisson likelihood fit of a Gaussian distribution
# In high energy physics, we usually want to fit a distribution to a histogram. Let's look at simple Gaussian distribution.
# <codecell>
# First let's make some example data
np.random.seed(0)
data = np.random.randn(10000) * 4 + 1
# sigma = 4 and mean = 1
plt.hist(data, bins=100, histtype='step');
# <codecell>
# Define your PDF / model
def gauss_pdf(x, mu, sigma):
"""Normalized Gaussian"""
return 1 / np.sqrt(2 * np.pi) / sigma * np.exp(-(x - mu) ** 2 / 2. / sigma ** 2)
# <codecell>
# Build your cost function
# Here we use binned likelihood
binned_likelihood = probfit.BinnedLH(gauss_pdf, data)
# <codecell>
# Create the minuit
# and give an initial value for the sigma parameter
minuit = iminuit.Minuit(binned_likelihood, sigma=3)
# Remember: minuit.errordef is automatically set to 0.5
# as required for likelihood fits (this was explained above)
binned_likelihood.draw(minuit);
# <codecell>
minuit.migrad()
# Like in all binned fit with long zero tail. It will have to do something about the zero bin
# probfit.BinnedLH does handle them gracefully but will give you a warning;
# <codecell>
# Visually check if the fit succeeded by plotting the model over the data
binned_likelihood.draw(minuit) # uncertainty is given by symmetric Poisson;
# <codecell>
# Let's see the result
print('Value: {}'.format(minuit.values))
print('Error: {}'.format(minuit.errors))
# <codecell>
# That printout can get out of hand quickly
minuit.print_fmin()
# Also print the correlation matrix
minuit.print_matrix()
# <codecell>
# Looking at a likelihood profile is a good method
# to check that the reported errors make sense
minuit.draw_mnprofile('mu');
# <codecell>
# Plot a 2d contour error
# You can notice that it takes some time to draw
# We will this is because our PDF is defined in Python
# We will show how to speed this up later
minuit.draw_mncontour('mu', 'sigma');
# <markdowncell>
# ## Chi^2 fit of a Gaussian distribution
#
# Let's explore another popular cost function chi^2.
# Chi^2 is bad when you have bin with 0.
# ROOT just ignore.
# ROOFIT does something I don't remember.
# But it's best to avoid using chi^2 when you have bin with 0 count.
# <codecell>
# We will use the same data as in the previous example
np.random.seed(0)
data = np.random.randn(10000) * 4 + 1
# sigma = 4 and mean = 1
plt.hist(data, bins=100, histtype='step');
# <codecell>
# We will use the same PDF as in the previous example
def gauss_pdf(x, mu, sigma):
"""Normalized Gaussian"""
return 1 / np.sqrt(2 * np.pi) / sigma * np.exp(-(x - mu) **2 / 2. / sigma ** 2)
# <codecell>
# Binned chi^2 fit only makes sense (for now) for extended PDFs
# probfit.Extended adds a norm parameter with name 'N'
extended_gauss_pdf = probfit.Extended(gauss_pdf)
# <codecell>
# Describe the function signature
iminuit.describe(extended_gauss_pdf)
# <codecell>
# Chi^2 distribution fit is really bad for distribution with long tail
# since when bin count=0... poisson error=0 and blows up chi^2
# so give it some range
chi2 = probfit.BinnedChi2(extended_gauss_pdf, data, bound=(-7,10))
# This time we use the pedantic=False option to tell Minuit
# that we don't want warnings about parameters without initial
# value or step size.
# And print_level=0 means that no output is generated
minuit = iminuit.Minuit(chi2, sigma=1, pedantic=False, print_level=0)
minuit.migrad();
# <codecell>
# Now let's look at the results
minuit.print_fmin()
minuit.print_matrix()
chi2.draw(minuit);
# <markdowncell>
# ## Fast unbinned likelihood fit Cython
#
# Unbinned likelihood is computationally very very expensive if you have a lot of data.
# It's now a good time that we talk about how to speed things up with [Cython](http://cython.org).
# <codecell>
# We will use the same data as in the previous example
np.random.seed(0)
data = np.random.randn(10000) * 4 + 1
# sigma = 4 and mean = 1
plt.hist(data, bins=100, histtype='step');
# <codecell>
# We want to speed things up with Cython
%load_ext cythonmagic
# <codecell>
%%cython
# Same gaussian distribution but now written in Cython
# The %%cython IPython does the following:
# * Call Cython to generate C code for a Python C extension.
# * Compile it into a Python C extension (a shared library)
# * Load it into the current namespace
# If you don't understand these things, don't worry, it basically means:
# * Get full-metal speed easily
cimport cython
from libc.math cimport exp, M_PI, sqrt
@cython.binding(True) # IMPORTANT: this tells Cython to dump the function signature
def gauss_pdf_cython(double x, double mu, double sigma):
return 1 / sqrt(2 * M_PI) / sigma * exp(-(x - mu) ** 2 / 2. / sigma ** 2)
# <codecell>
# Define the unbinned likelihood cost function
unbinned_likelihood = probfit.UnbinnedLH(gauss_pdf_cython, data)
# <codecell>
minuit = iminuit.Minuit(unbinned_likelihood, sigma=2, pedantic=False, print_level=0)
# Remember: minuit.errordef is automatically set to 0.5
# as required for likelihood fits (this was explained above)
minuit.migrad() # yes: amazingly fast
unbinned_likelihood.show(minuit)
minuit.print_fmin()
minuit.print_matrix()
# <codecell>
# Remember how slow draw_mnprofile() was in the last example?
# Now it's super fast (even though the unbinned
# likelihood computation is more compute-intensive).
minuit.draw_mnprofile('mu');
# <markdowncell>
# But you really don't have to write your own gaussian, there are tons of builtin functions written in Cython for you.
# <codecell>
# Here's how you can list them
import probfit.pdf
print(dir(probfit.pdf))
print(iminuit.describe(probfit.pdf.gaussian))
print(type(probfit.pdf.gaussian))
# But actually they are always all imported into the main probfit
# namespace, so we'll keep using the simpler probfit.gaussian instead of
# probfit.pdf.gaussian here.
# <codecell>
unbinned_likelihood = probfit.UnbinnedLH(probfit.gaussian, data)
minuit = iminuit.Minuit(unbinned_likelihood, sigma=2, pedantic=False)
# Remember: minuit.errordef is automatically set to 0.5
# as required for likelihood fits (this was explained above)
minuit.migrad() # yes: amazingly fast
unbinned_likelihood.draw(minuit, show_errbars='normal') # control how fit is displayed too;
# <codecell>
# Draw the difference between data and PDF
plt.figure(figsize=(13,4))
plt.subplot(121)
unbinned_likelihood.draw_residual(minuit)
plt.subplot(122)
unbinned_likelihood.draw_residual(minuit, show_errbars=True, errbar_algo='sumw2', norm=True)
# <markdowncell>
# ##But... We can't normalize everything analytically and how to generate toy sample from PDF
#
# When fitting distribution to a PDF, one of the common problem that we run into is normalization.
# Not all function is analytically integrable on the range of our interest.
#
# Let's look at an example: the [Crystal Ball function](http://en.wikipedia.org/wiki/Crystal_Ball_function).
# It's simply a gaussian with a power law tail ... normally found in energy deposited in crystals ...
# impossible to normalize analytically and normalization will depend on shape parameters.
# <codecell>
numpy.random.seed(0)
bound = (-1, 2)
data = probfit.gen_toy(probfit.crystalball, 10000, bound=bound, alpha=1., n=2., mean=1., sigma=0.3, quiet=False)
# quiet=False tells gen_toy to plot out original function
# toy histogram and poisson error from both orignal distribution and toy
# <codecell>
# To fit this function as a distribution we need to normalize
# so that is becomes a PDF ober the range we consider here.
# We do this with the probfit.Normalized functor, which implements
# the trapezoid numerical integration method with a simple cache mechanism
normalized_crystalball = probfit.Normalized(probfit.crystalball, bound)
# this can also bedone with decorator
# @probfit.normalized(bound)
# def my_function(x, blah):
# return something
pars = 1.0, 1, 2, 1, 0.3
print('function: {}'.format(probfit.crystalball(*pars)))
print(' pdf: {}'.format(normalized_crystalball(*pars)))
# <codecell>
# The normalized version has the same signature as the non-normalized version
print(iminuit.describe(probfit.crystalball))
print(iminuit.describe(normalized_crystalball))
# <codecell>
# We can fit the normalized function in the usual way ...
unbinned_likelihood = probfit.UnbinnedLH(normalized_crystalball, data)
start_pars = dict(alpha=1, n=2.1, mean=1.2, sigma=0.3)
minuit = iminuit.Minuit(unbinned_likelihood, **start_pars)
# Remember: minuit.errordef is automatically set to 0.5
# as required for likelihood fits (this was explained above)
minuit.migrad() # yes: amazingly fast Normalize is written in Cython
unbinned_likelihood.show(minuit)
# The Crystal Ball function is notorious for its sensitivity on the 'n' parameter
# probfit give you a heads up where it might have float overflow;
# <markdowncell>
# ## But what if I know the analytical integral formula for my distribution?
#
# `probfit` checks for a method called `integrate` with the signature `integrate(bound, nint, *arg)` to
# compute definite integrals for given `bound` and `nint` (pieces of integral this is normally ignored)
# and the rest will be passed as positional argument.
#
# For some `probfit` built-in distributions analytical formulae have been implemented.
# <codecell>
def line(x, m, c):
return m * x + c
# compute integral of line from x=(0,1) using 10 intevals with m=1. and c=2.
# all probfit internal use this
# no integrate method available probfit use simpson3/8
print(probfit.integrate1d(line, (0, 1), 10, (1., 2.)))
# Let us illustrate the point by forcing it to have integral that's off by
# factor of two
def wrong_line_integrate(bound, nint, m, c):
a, b = bound
# I know this is wrong:
return 2 * (m * (b ** 2 / 2. - a ** 2 / 2.) + c * (b - a))
line.integrate = wrong_line_integrate
# line.integrate = lambda bound, nint, m, c: blah blah # this works too
print(probfit.integrate1d(line, (0, 1), 10, (1., 2.)))
# <headingcell level=2>
# What if things go wrong?
# <markdowncell>
# In this section we show you what happens when your distribution doesn't fit and how you can make it.
#
# We again use the Crystal Ball distribution as an example, which is notoriously sensitive to initial parameter values.
# <codecell>
unbinned_likelihood = probfit.UnbinnedLH(normalized_crystalball, data)
# No initial values given -> all parameters have default initial value 0
minuit = iminuit.Minuit(unbinned_likelihood)
# Remember: minuit.errordef is automatically set to 0.5
# as required for likelihood fits (this was explained above)
minuit.migrad() # yes: amazingly fast but tons of output on the console
# Remember there is a heads up;
# <codecell>
# This shows that we failed.
# The parameters are still at the default initial values
unbinned_likelihood.show(minuit);
# <codecell>
# These two status flags tell you if the best-fit parameter values
# and the covariance matrix (the parameter errors) are OK.
print(minuit.migrad_ok())
print(minuit.matrix_accurate())
# <markdowncell>
# To make MIGRAD converge we need start parameter values that are roughly correct. Remember that above the same fit converged when we used ::
#
# start_pars = dict(alpha=1, n=2.1, mean=1.2, sigma=0.3)
# minuit = iminuit.Minuit(unbinned_likelihood, **start_pars)
#
# #### But how can we guess these initial values?
#
# This is a hard question that doesn't have one simple answer. Visualizing your data and model helps.
# <codecell>
# Try one set of parameters
best_try = probfit.try_uml(normalized_crystalball, data, alpha=1., n=2.1, mean=1.2, sigma=0.3)
print(best_try)
# <codecell>
# Or try multiple sets of parameters
# (too many will just confuse you)
best_try = probfit.try_uml(normalized_crystalball, data, alpha=1., n=2.1, mean=[1.2, 1.1], sigma=[0.3, 0.5])
# try_uml computes the unbinned likelihood for each set of parameters and returns the best
# one as a dictionary.
# This is actually a poor-man's optimization algorithm in itself called grid search
# which is popular to find good start values for other, faster optimization methods like MIGRAD.
print(best_try)
# <headingcell level=2>
# Extended fit: two Gaussians with polynomial background
# <markdowncell>
# Here we show how to create and fit a model that is the sum of several other models.
# <codecell>
# Generate some example data
np.random.seed(0)
data_peak1 = np.random.randn(3000) * 0.2 + 2
data_peak2 = np.random.randn(5000) * 0.1 + 4
data_range = (-2, 5)
data_bg = probfit.gen_toy(lambda x : 4 + 4 * x + x ** 2, 20000, data_range)
data_all = np.concatenate([data_peak1, data_peak2, data_bg])
plt.hist((data_peak1, data_peak2, data_bg, data_all),
label=['Signal 1', 'Signal 2', 'Background', 'Total'],
bins=200, histtype='step', range=data_range)
plt.legend(loc='upper left');
# <codecell>
# Using a polynomial to fit a distribution is problematic, because the
# polynomial can assume negative values, which results in NaN (not a number)
# values in the likelihood function.
# To avoid this problem we restrict the fit to the range (0, 5) where
# the polynomial is clearly positive.
fit_range = (0, 5)
normalized_poly = probfit.Normalized(probfit.Polynomial(2), fit_range)
normalized_poly = probfit.Extended(normalized_poly, extname='NBkg')
gauss1 = probfit.Extended(probfit.rename(probfit.gaussian, ['x', 'mu1', 'sigma1']), extname='N1')
gauss2 = probfit.Extended(probfit.rename(probfit.gaussian, ['x', 'mu2', 'sigma2']), extname='N2')
# Define an extended PDF consisting of three components
pdf = probfit.AddPdf(normalized_poly, gauss1, gauss2)
print('normalized_poly: {}'.format(probfit.describe(normalized_poly)))
print('gauss1: {}'.format(probfit.describe(gauss1)))
print('gauss2: {}'.format(probfit.describe(gauss2)))
print('pdf: {}'.format(probfit.describe(pdf)))
# <codecell>
# Define the cost function in the usual way ...
binned_likelihood = probfit.BinnedLH(pdf, data_all, bins=200, extended=True, bound=fit_range)
# This is a quite complex fit (11 free parameters!), so we need good starting values.
# Actually we even need to set an initial parameter error
# for 'mu1' and 'mu2' to make MIGRAD converge.
# The initial parameter error is used as the initial step size in the minimization.
pars = dict(mu1=1.9, error_mu1=0.1, sigma1=0.2, N1=3000,
mu2=4.1, error_mu2=0.1, sigma2=0.1, N2=5000,
c_0=4, c_1=4, c_2=1, NBkg=20000)
minuit = iminuit.Minuit(binned_likelihood, pedantic=False, print_level=0, **pars)
# You can see that the model already roughly matches the data
binned_likelihood.draw(minuit, parts=True);
# <codecell>
# This can take a while ... the likelihood is evaluated a few 100 times
# (and each time the distributions are evaluated, including the
# numerical computation of the normalizing integrals)
minuit.migrad();
# <codecell>
binned_likelihood.show(minuit, parts=True);
minuit.print_fmin()
minuit.print_matrix()
# <markdowncell>
# Note the red upper left corner in the correlation matrix above?
#
# It shows that the three polynomial parameters `c_0`, `c_1` and `c_2` are highly correlated?
# The reason is that we put a constraint on the polynomial to be normalized over the fit range:
#
# fit_range = (0, 5)
# normalized_poly = probfit.Normalized(probfit.Polynomial(2), fit_range)
# normalized_poly = probfit.Extended(normalized_poly, extname='NBkg')
#
# To resolve this problem you could simply use a non-normalized and non-extended polynomial to model the background. We won't do this here, though ...
# <markdowncell>
# ## Custom Drawing
#
# The `draw()` and `show()` method we provide is intended to just give you a quick look at your fit.
#
# To make a custom drawing you can use the return value of `draw()` and `show()`.
# <codecell>
# You should copy & paste the return tuple from the `draw` docstring ...
((data_edges, datay), (errorp, errorm), (total_pdf_x, total_pdf_y), parts) = binned_likelihood.draw(minuit, parts=True);
# ... now we have everything to make our own plot
# <codecell>
# Now make the plot as pretty as you like, e.g. with matplotlib.
plt.figure(figsize=(8, 5))
plt.errorbar(probfit.mid(data_edges), datay, errorp, fmt='.', capsize=0, color='Gray', label='Data')
plt.plot(total_pdf_x, total_pdf_y, color='blue', lw=2, label='Total Model')
colors = ['orange', 'purple', 'DarkGreen']
labels = ['Background', 'Signal 1', 'Signal 2']
for color, label, part in zip(colors, labels, parts):
x, y = part
plt.plot(x, y, ls='--', color=color, label=label)
plt.grid(True)
plt.legend(loc='upper left');
# <markdowncell>
# ## Simultaneous fit to several data sets
#
# Sometimes, what we want to fit is the sum of likelihood /chi^2 of two PDFs for two different datasets that share some parameters.
#
# In this example, we will fit two Gaussian distributions where we know that the widths are the same
# but the peaks are at different places.
# <codecell>
# Generate some example data
np.random.seed(0)
data1 = np.random.randn(10000) + 3 # mean = 3, sigma = 1
data2 = np.random.randn(10000) - 2 # mean = -2, sigma = 1
plt.figure(figsize=(12,4))
plt.subplot(121)
plt.hist(data1, bins=100, range=(-7, 7), histtype='step', label='data1')
plt.legend()
plt.subplot(122)
plt.hist(data2, bins=100, range=(-7, 7), histtype='step', label='data2')
plt.legend();
# <codecell>
# There is nothing special about built-in cost function
# except some utility function like draw and show
likelihood1 = probfit.UnbinnedLH(probfit.rename(probfit.gaussian, ('x', 'mean2', 'sigma')), data1)
likelihood2 = probfit.UnbinnedLH(probfit.gaussian, data2)
simultaneous_likelihood = probfit.SimultaneousFit(likelihood1, likelihood2)
print(probfit.describe(likelihood1))
print(probfit.describe(likelihood2))
# Note that the simultaneous likelihood has only 3 parameters, because the
# 'sigma' parameter is tied (i.e. linked to always be the same).
print(probfit.describe(simultaneous_likelihood))
# <codecell>
# Ah, the beauty of Minuit ... it doesn't care what your cost funtion is ...
# you can use it to fit (i.e. compute optimal parameters and parameter errors) anything.
minuit = iminuit.Minuit(simultaneous_likelihood, sigma=0.5, pedantic=False, print_level=0)
# Well, there's one thing we have to tell Minuit so that it can compute parameter errors,
# and that is the value of `errordef`, a.k.a. `up` (explained above).
# This is a likelihood fit, so we need `errordef = 0.5` and not the default `errordef = 1`:
minuit.errordef = 0.5
# <codecell>
# Run the fit and print the results
minuit.migrad();
minuit.print_fmin()
minuit.print_matrix()
# <codecell>
simultaneous_likelihood.draw(minuit);
# <markdowncell>
# ## Blinding parameters
#
# Often, an analyst would like to avoid looking at the result of the fitted parameter(s) before he/she finalized the analysis in order to avoid biases due to the prejudice of the analyst. Probfit provids a transformation function that hides the true value(s) of the parameter(s). The transformation function requires a string to set the seed of the random number generator, and a scale to smear the parameter(s) using a Gaussian.
# <codecell>
from probfit import UnbinnedLH, BlindFunc, rename, AddPdfNorm
from probfit import gaussian
from iminuit import Minuit, describe
from probfit import gen_toy
# <codecell>
g0= rename(gaussian, ['x', 'm0', 's0'])
g1= rename(gaussian, ['x', 'm1', 's1'])
pdf= AddPdfNorm(g0,g1)
describe(pdf)
# <codecell>
seed(0)
toydata = gen_toy(pdf, 1000,(-10,10), m0=-2, m1=2, s0=1, s1=1, f_0=0.3, quiet=False)
# <codecell>
inipars= dict(m0=0, m1=0, s0=1, s1=1, f_0=0.5, error_m0=0.1, error_m1=0.1, error_s0=0.1, error_s1=0.1, error_f_0=0.1)
# <codecell>
# Normal fit
uh1= UnbinnedLH(pdf, toydata)
m1= Minuit(uh1, print_level=1, **inipars)
m1.migrad();
uh1.draw();
print m1.values
# <codecell>
# Blind one parameter
uh2= UnbinnedLH( BlindFunc(pdf, toblind='m1', seedstring='some_random_stuff', width=0.5, signflip=False), toydata)
m2= Minuit(uh2, print_level=1, **inipars)
m2.migrad();
uh2.draw();
print m2.values
# <codecell>
# Blind more than one parameter. They will be shifted by the same amount
uh3= UnbinnedLH( BlindFunc(pdf, ['m0','m1'], seedstring='some_random_stuff', width=0.5, signflip=False), toydata)
m3= Minuit(uh3, print_level=1, **inipars)
m3.migrad();
uh3.draw();
print m3.values
# <codecell>
print m1.values
print m2.values
print m3.values
print
print m1.errors
print m2.errors
print m3.errors
# <codecell>
print m3.values['m0']-m1.values['m0']
print m3.values['m1']-m1.values['m1']
# <codecell>
# Now it's your turn ...
# try and apply probfit / iminuit and to your modeling / fitting task!
|
|
import pytest
from eth_utils import (
add_0x_prefix,
decode_hex,
remove_0x_prefix,
to_canonical_address,
to_checksum_address,
to_normalized_address,
)
from eth_accounts.ciphers import ciphers
from eth_accounts.kdfs import kdfs
from eth_accounts.validation import validate_keystore
from eth_accounts import (
InvalidKeystore,
UnsupportedKeystore,
)
from .fixtures import keystore
def test_template_valid(keystore):
validate_keystore(keystore)
def test_no_missing_required_fields():
required_fields = ['crypto', 'version']
for field in required_fields:
k = keystore()
k.pop(field)
with pytest.raises(InvalidKeystore):
validate_keystore(k)
def test_optional_fields():
optional_present = ['id', 'address']
optional_missing = [
('name', 'test'),
('meta', 'test')
]
for field in optional_present:
k = keystore()
k.pop(field)
validate_keystore(k)
for field, value in optional_missing:
k = keystore()
k[field] = value
validate_keystore(k)
def test_no_missing_crypto_fields():
required_fields = ['cipher', 'cipherparams', 'ciphertext', 'kdf', 'kdfparams', 'mac']
for field in required_fields:
k = keystore()
k['crypto'].pop(field)
with pytest.raises(InvalidKeystore):
validate_keystore(k)
def test_no_additional_crypto_fields():
additional_fields = [
('test', 'test'),
('CIPHER', {})
]
for field, value in additional_fields:
k = keystore()
k['crypto'][field] = value
with pytest.raises(InvalidKeystore):
validate_keystore(k)
def test_supported_versions(keystore):
valid_versions = [3]
for version in valid_versions:
keystore['version'] = version
validate_keystore(keystore)
def test_invalid_versions(keystore):
invalid_versions = [3.0, '3', '3.0', 'three', -1, None, [], {}, [3], {3: 3}]
for version in invalid_versions:
keystore['version'] = version
with pytest.raises(InvalidKeystore):
validate_keystore(keystore)
def test_unsupported_versions(keystore):
unsupported_versions = [1, 2, 4]
for version in unsupported_versions:
keystore['version'] = version
with pytest.raises(UnsupportedKeystore):
validate_keystore(keystore)
def test_valid_addresses(keystore):
address_template = '0123456789abcdef0123456789abcdef01234567'
valid_addresses = [
to_normalized_address(address_template),
remove_0x_prefix(to_normalized_address(address_template)),
to_checksum_address(address_template),
remove_0x_prefix(to_checksum_address(address_template)),
address_template.upper(),
add_0x_prefix(address_template.upper())
]
for address in valid_addresses:
keystore['address'] = address
validate_keystore(keystore)
def test_invalid_addresses(keystore):
address_template = '0123456789abcdef0123456789abcdef01234567'
valid_addresses = [
address_template[:-2],
address_template + '89',
to_canonical_address(address_template),
'gg' * 20,
None,
0,
{},
[],
[address_template]
]
for address in valid_addresses:
keystore['address'] = address
with pytest.raises(InvalidKeystore):
validate_keystore(keystore)
def test_valid_meta(keystore):
valid_meta = ['', 'test']
for meta in valid_meta:
keystore['meta'] = meta
validate_keystore(keystore)
def test_invalid_meta(keystore):
invalid_meta = [0, None, {}, [], ['meta'], {'meta': 'meta'}]
for meta in invalid_meta:
keystore['meta'] = meta
with pytest.raises(InvalidKeystore):
validate_keystore(keystore)
def test_valid_name(keystore):
valid_names = ['', 'test']
for name in valid_names:
keystore['name'] = name
validate_keystore(keystore)
def test_invalid_name(keystore):
invalid_names = [0, None, {}, [], ['meta'], {'meta': 'meta'}]
for name in invalid_names:
keystore['name'] = name
with pytest.raises(InvalidKeystore):
validate_keystore(keystore)
def test_valid_macs(keystore):
valid_macs = ['0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef']
for mac in valid_macs:
keystore['crypto']['mac'] = mac
validate_keystore(keystore)
def test_invalid_macs(keystore):
mac_template = '0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
invalid_macs = [
'0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdeF',
mac_template.upper(),
mac_template[:-2],
mac_template + '01',
'gg' * 32,
add_0x_prefix(mac_template),
decode_hex(mac_template)
]
for mac in invalid_macs:
keystore['crypto']['mac'] = mac
with pytest.raises(InvalidKeystore):
validate_keystore(keystore)
def test_unsupported_ciphers(keystore):
unsupported_ciphers = ['', 'test']
for cipher in unsupported_ciphers:
assert cipher not in ciphers
keystore['crypto']['cipher'] = cipher
with pytest.raises(UnsupportedKeystore):
validate_keystore(keystore)
def test_invalid_ciphers(keystore):
invalid_ciphers = [5, None, [], {}]
for cipher in invalid_ciphers:
keystore['crypto']['cipher'] = cipher
with pytest.raises(InvalidKeystore):
validate_keystore(keystore)
def test_invalid_cipher_params(keystore):
invalid_params = [5, None, [], 'params']
for params in invalid_params:
keystore['crypto']['cipherparams'] = params
with pytest.raises(InvalidKeystore):
validate_keystore(keystore)
def test_unsupported_kdfs(keystore):
unsupported_kdfs = ['', 'test']
for kdf in unsupported_kdfs:
assert kdf not in kdfs
keystore['crypto']['kdf'] = kdf
with pytest.raises(UnsupportedKeystore):
validate_keystore(keystore)
def test_invalid_kdfs(keystore):
invalid_kdfs = [5, None, [], {}]
for kdf in invalid_kdfs:
keystore['crypto']['kdf'] = kdf
with pytest.raises(InvalidKeystore):
validate_keystore(keystore)
def test_invalid_kdf_params(keystore):
invalid_params = [5, None, [], 'params']
for params in invalid_params:
keystore['crypto']['kdfparams'] = params
with pytest.raises(InvalidKeystore):
validate_keystore(keystore)
|
|
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import datetime
import errno
import os
import re
import subprocess
import sys
import trollius as asyncio
from shlex import split as cmd_split
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
from catkin_pkg.packages import find_packages
from .terminal_color import ColorMapper
color_mapper = ColorMapper()
clr = color_mapper.clr
try:
string_type = basestring
except NameError:
string_type = str
class FakeLock(asyncio.locks.Lock):
"""Fake lock used to mimic an asyncio.Lock but without causing synchronization"""
def locked(self):
return False
@asyncio.coroutine
def acquire(self):
raise asyncio.Return(True)
def release(self):
pass
def getcwd(symlinks=True):
"""Get the current working directory.
:param symlinks: If True, then get the path considering symlinks. If false,
resolve the path to the actual path.
:type symlinks: bool
:returns: the current working directory
:rtype: str
"""
cwd = ''
# Get the real path
realpath = os.getcwd()
# The `PWD` environment variable should contain the path that we took to
# get here, includng symlinks
if symlinks:
cwd = os.environ.get('PWD', '')
# Fallback on `getcwd` if the `PWD` variable is wrong
if not cwd or not os.path.exists(cwd) or os.path.realpath(cwd) != realpath:
cwd = realpath
return cwd
def format_time_delta(delta):
"""Formats a given time delta, in seconds, into a day-hour-minute-second string
Seconds are limited to one decimal point accuracy. Days, hours, and minutes
are not printed unless required.
Examples:
1.45 => 1.4 seconds
61.45 => 1 minute and 1.4 seconds
121.45 => 2 minutes and 1.4 seconds
3721.45 => 1 hour 2 minutes and 1.4 seconds
7321.45 => 2 hours 2 minutes and 1.4 seconds
93821.45 => 1 days, 2 hours 2 minutes and 1.4 seconds
:param delta: time delta to format, in seconds
:type delta: float
:returns: formatted time string
:rtype: str
"""
days = "0"
date_str = str(datetime.timedelta(seconds=delta))
if ', ' in date_str:
days, date_str = date_str.split(', ')
hours, minutes, seconds = date_str.split(':')
msg = "" if int(days.split(' ')[0]) == 0 else days + " "
msg += "" if int(hours) == 0 else (hours + " hour{0} ".format('' if int(hours) <= 1 else 's'))
msg += "" if int(minutes) == 0 else ("{0} minute{1} and ".format(int(minutes), '' if int(minutes) <= 1 else 's'))
msg += "{0:.1f}".format(float(seconds))
msg += " seconds"
return msg
def format_time_delta_short(delta):
"""Formats a given time delta, in seconds, into a short day-hour-minute-second string
Seconds are limited to one decimal point accuracy. Days, hours, and minutes
are not printed unless required.
Examples:
1.45 => 1.4
61.45 => 1:01.4
121.45 => 2:01.4
3721.45 => 1:02:01.4
7321.45 => 2:02:01.4
93821.45 => 1 days, 2:02:01.4
:param delta: time delta to format, in seconds
:type delta: float
:returns: formatted time string
:rtype: str
"""
days = "0"
date_str = str(datetime.timedelta(seconds=delta))
if ', ' in date_str:
days, date_str = date_str.split(', ')
hours, minutes, seconds = date_str.split(':')
msg = "" if int(days.split(' ')[0]) == 0 else days + " "
msg += "" if int(hours) == 0 else (hours + ":")
msg += "" if int(minutes) == 0 else (minutes + ":")
msg += ("{0:.1f}" if int(minutes) == 0 else "{0:04.1f}").format(float(seconds))
return msg
__recursive_build_depends_cache = {}
def get_cached_recursive_build_depends_in_workspace(package, workspace_packages):
"""Returns cached or calculated recursive build dependes for a given package
If the recursive build depends for this package and this set of workspace
packages has already been calculated, the cached results are returned.
:param package: package for which the recursive depends should be calculated
:type package: :py:class:`catkin_pkg.package.Package`
:param workspace_packages: packages in the workspace, keyed by name, with
value being a tuple of package path and package object
:type workspace_packages: dict(package_name, tuple(package path,
:py:class:`catkin_pkg.package.Package`))
:returns: list of package path, package object tuples which are the
recursive build depends for the given package
:rtype: list(tuple(package path, :py:class:`catkin_pkg.package.Package`))
"""
workspace_key = ','.join([pkg.name for pth, pkg in workspace_packages])
if workspace_key not in __recursive_build_depends_cache:
__recursive_build_depends_cache[workspace_key] = {}
cache = __recursive_build_depends_cache[workspace_key]
if package.name not in cache:
cache[package.name] = get_recursive_build_depends_in_workspace(package, workspace_packages)
__recursive_build_depends_cache[workspace_key] = cache
return __recursive_build_depends_cache[workspace_key][package.name]
def get_recursive_build_depends_in_workspace(package, ordered_packages):
"""Calculates the recursive build dependencies of a package which are also in the ordered_packages
:param package: package for which the recursive depends should be calculated
:type package: :py:class:`catkin_pkg.package.Package`
:param ordered_packages: packages in the workspace, ordered topologically,
stored as a list of tuples of package path and package object
:type ordered_packages: list(tuple(package path,
:py:class:`catkin_pkg.package.Package`))
:returns: list of package path, package object tuples which are the
recursive build depends for the given package
:rtype: list(tuple(package path, :py:class:`catkin_pkg.package.Package`))
"""
workspace_packages_by_name = dict([(pkg.name, (pth, pkg)) for pth, pkg in ordered_packages])
workspace_package_names = [pkg.name for pth, pkg in ordered_packages]
recursive_depends = []
deps = package.build_depends + package.buildtool_depends + package.test_depends
depends = set([dep.name for dep in deps])
checked_depends = set()
while list(depends - checked_depends):
# Get a dep
dep = list(depends - checked_depends).pop()
# Add the dep to the checked list
checked_depends.add(dep)
# If it is not in the workspace, continue
if dep not in workspace_package_names:
continue
# Add the build, buildtool, and run depends of this dep to the list to be checked
dep_pth, dep_pkg = workspace_packages_by_name[dep]
dep_depends = dep_pkg.build_depends + dep_pkg.buildtool_depends + dep_pkg.run_depends
depends.update(set([d.name for d in dep_depends]))
# Add this package to the list of recursive dependencies for this package
recursive_depends.append((dep_pth, dep_pkg))
return recursive_depends
def get_recursive_run_depends_in_workspace(packages, ordered_packages):
"""Calculates the recursive run depends of a set of packages which are also in the ordered_packages
but excluding packages which are build depended on by another package in the list
:param packages: packages for which the recursive depends should be calculated
:type packages: list of :py:class:`catkin_pkg.package.Package`
:param ordered_packages: packages in the workspace, ordered topologically,
stored as a list of tuples of package path and package object
:type ordered_packages: list(tuple(package path,
:py:class:`catkin_pkg.package.Package`))
:returns: list of package path, package object tuples which are the
recursive run depends for the given package
:rtype: list(tuple(package path, :py:class:`catkin_pkg.package.Package`))
"""
workspace_packages_by_name = dict([(pkg.name, (pth, pkg)) for pth, pkg in ordered_packages])
workspace_package_names = [pkg.name for pth, pkg in ordered_packages]
recursive_depends = []
depends = set([dep.name for package in packages for dep in package.run_depends])
checked_depends = set()
while len(depends - checked_depends) > 0:
# Get a dep
dep = list(depends - checked_depends).pop()
# Add the dep to the checked list
checked_depends.add(dep)
# If it is not in the workspace, continue
if dep not in workspace_package_names:
continue
# Add the run depends of this dep to the list to be checked
dep_pth, dep_pkg = workspace_packages_by_name[dep]
depends.update(set([d.name for d in dep_pkg.run_depends]))
# Also update the checked_depends with its build depends
checked_depends.update(set([d.name for d in (dep_pkg.buildtool_depends + dep_pkg.build_depends)]))
# Add this package to the list of recursive dependencies for this package
recursive_depends.append((dep_pth, dep_pkg))
return recursive_depends
def get_recursive_build_dependants_in_workspace(package_name, ordered_packages):
"""Calculates the recursive build dependants of a package which are also in
the ordered_packages
:param package: package for which the recursive depends should be calculated
:type package: :py:class:`catkin_pkg.package.Package`
:param ordered_packages: packages in the workspace, ordered topologically,
stored as a list of tuples of package path and package object
:type ordered_packages: list(tuple(package path,
:py:class:`catkin_pkg.package.Package`))
:returns: list of package path, package object tuples which are the
recursive build depends for the given package
:rtype: list(tuple(package path, :py:class:`catkin_pkg.package.Package`))
"""
recursive_dependants = list()
for pth, pkg in reversed(ordered_packages):
# Break if this is one to check
if pkg.name == package_name:
break
# Check if this package depends on the target package
deps = get_recursive_build_depends_in_workspace(pkg, ordered_packages)
deps_names = [p.name for _, p in deps]
if package_name in deps_names:
recursive_dependants.insert(0, (pth, pkg))
return recursive_dependants
def is_tty(stream):
"""Returns True if the given stream is a tty, else False"""
return hasattr(stream, 'isatty') and stream.isatty()
unicode_error_printed = False
unicode_sanitizer = re.compile(r'[^\x00-\x7F]+')
def log(*args, **kwargs):
"""Wrapper for print, allowing for special handling where necessary"""
global unicode_error_printed
try:
print(*args, **kwargs)
except UnicodeEncodeError:
# Strip unicode characters from string args
sanitized_args = [unicode_sanitizer.sub('?', a)
if type(a) in [str, unicode]
else a
for a in args]
print(*sanitized_args, **kwargs)
# Warn the user that
if not unicode_error_printed:
print('WARNING: Could not encode unicode characters. Please set the'
' PYTHONIOENCODING environment variable to see complete output.'
' (i.e. PYTHONIOENCODING=UTF-8)',
file=sys.stderr)
unicode_error_printed = True
def terminal_width_windows():
"""Returns the estimated width of the terminal on Windows"""
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
# return default size if actual size can't be determined
if not res:
return 80
import struct
(bufx, bufy, curx, cury, wattr, left, top, right, bottom, maxx, maxy)\
= struct.unpack("hhhhHhhhhhh", csbi.raw)
width = right - left + 1
return width
def terminal_width_linux():
"""Returns the estimated width of the terminal on linux"""
width = subprocess.Popen('tput cols', shell=True, stdout=subprocess.PIPE, close_fds=False).stdout.readline()
return int(width)
def terminal_width():
"""Returns the estimated width of the terminal"""
try:
return terminal_width_windows() if os.name == 'nt' else terminal_width_linux()
except ValueError:
# Failed to get the width, use the default 80
return 80
_ansi_escape = re.compile(r'\x1b[^m]*m')
def remove_ansi_escape(string):
"""Removes any ansi escape sequences from a string and returns it"""
global _ansi_escape
return _ansi_escape.sub('', string)
def slice_to_printed_length(string, length):
"""Truncates a string, which may contain non-printable characters, to a printed length
For example:
msg = '\033[32mfoo\033[31mbar\033[0m'
has a length of 20, but a printed length of 6. If you wanted to truncate the
printed string to 4, then printing ``msg[4]`` would not provide the desired
result. Instead the actual slice index must consider the non-printable
characters.
:param string: string to be truncated
:type string: str
:param length: printed length of the resulting string
:type length: int
:returns: truncated string
:rtype: str
"""
global _ansi_escape
lookup_array = []
current_index = 0
matches = list(_ansi_escape.finditer(string))
for m in matches:
for x in range(m.start() - current_index):
lookup_array.append(current_index)
current_index += 1
current_index += len(m.group())
if not matches:
# If no matches, then set the lookup_array to a plain range
lookup_array = list(range(len(string)))
lookup_array.append(len(string))
if length > len(lookup_array):
return string
return string[:lookup_array[length]] + clr('@|')
def printed_fill(string, length):
"""Textwrapping for strings with esacpe characters."""
splat = string.replace('\\n', ' \\n ').split()
count = 0
lines = []
cur_line = []
for word in splat:
word_len = len(remove_ansi_escape(word))
found_newline = (word == '\\n')
if found_newline:
lines.append(cur_line)
cur_line = []
count = 0
elif count + word_len < length:
cur_line.append(word)
count += word_len + 1
else:
if len(cur_line) > 0:
lines.append(cur_line)
cur_line = [word]
count = word_len + 1
if len(cur_line) > 0:
lines.append(cur_line)
return ("\n".join([' '.join(line) for line in lines])).replace('\\t', '\t')
def __wide_log(msg, **kwargs):
width = terminal_width()
rhs = ''
if 'rhs' in kwargs:
rhs = ' ' + kwargs['rhs']
del kwargs['rhs']
if rhs:
kwargs['truncate'] = True
rhs_len = len(remove_ansi_escape(rhs))
msg_len = len(remove_ansi_escape(msg))
if 'truncate' in kwargs:
if kwargs['truncate'] and msg_len >= width - 1:
msg = slice_to_printed_length(msg, width - rhs_len - 4) + '...'
msg_len = len(remove_ansi_escape(msg))
del kwargs['truncate']
if (msg_len + rhs_len) < width:
log(msg + (' ' * (width - msg_len - rhs_len - 1)) + rhs, **kwargs)
else:
log(msg, **kwargs)
wide_log_fn = __wide_log
def disable_wide_log():
"""Disables wide logging globally
:see: :py:func:`wide_log`
"""
global wide_log_fn
def disabled_wide_log(msg, **kwargs):
if 'rhs' in kwargs:
del kwargs['rhs']
if 'truncate' in kwargs:
del kwargs['truncate']
log(msg, **kwargs)
wide_log_fn = disabled_wide_log
def wide_log(msg, **kwargs):
"""Prints a message to the screen, filling the remainder of the screen with spaces
This is useful for printing lines which will completely overwrite previous
content printed with a carriage return at the end.
If the message is wider than the terminal, then no filling is done.
The wide logging can be disabled with :py:func:`disable_wide_log`, in order
to prevent queries to the terminal width, which is useful when output is
not to a terminal, like when being used with Continuous Integration.
Truncating and right hand side messages are disabled when wide_log is
disabled as well.
When a right hand side message is given, it implies truncate is True.
:param msg: message to be printed
:type msg: str
:param rhs: message to print at the right hand side of the screen
:type rhs: str
:param truncate: If True, messages wider the then terminal will be truncated
:type truncate: bool
"""
try:
global wide_log_fn
wide_log_fn(msg, **kwargs)
except IOError:
# This happens when someone ctrl-c's during a log message
pass
def find_enclosing_package(search_start_path=None, ws_path=None, warnings=None, symlinks=True):
"""Get the package containing the current directory."""
search_start_path = search_start_path or getcwd(symlinks=symlinks)
child_path = ''
while True:
pkgs = find_packages(search_start_path, warnings=warnings)
# Check if the previous directory is a catkin package
if child_path in pkgs:
return pkgs[child_path].name
# Update search path or end
(search_start_path, child_path) = os.path.split(search_start_path)
if len(child_path) == 0 or search_start_path == ws_path:
break
return None
def version_tuple(v):
"""Get an integer version tuple from a string."""
return tuple(map(int, (str(v).split("."))))
def mkdir_p(path):
"""Equivalent to UNIX mkdir -p"""
if os.path.exists(path):
return
try:
return os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def format_env_dict(environ):
"""Format an environment dict for printing to console similarly to `typeset` builtin."""
return '\n'.join([
'typeset -x {}={}'.format(k, cmd_quote(v))
for k, v in environ.items()
])
def parse_env_str(environ_str):
"""Parse a quoted environment string generated by format_env_dict, or `typeset` builtin."""
try:
split_envs = [e.split('=', 1) for e in cmd_split(environ_str)]
return {
e[0]: e[1] for e
in split_envs
if len(e) == 2
}
except ValueError:
print('WARNING: Could not parse env string: `{}`'.format(environ_str),
file=sys.stderr)
raise
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variable functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
from tensorflow.contrib.framework.python.ops import add_arg_scope as contrib_add_arg_scope
from tensorflow.contrib.framework.python.ops import gen_variable_ops
from tensorflow.contrib.util import loader
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import training_util
from tensorflow.python.util.deprecation import deprecated
__all__ = ['add_model_variable',
'assert_global_step',
'assert_or_get_global_step',
'assign_from_checkpoint',
'assign_from_checkpoint_fn',
'assign_from_values',
'assign_from_values_fn',
'create_global_step',
'filter_variables',
'get_global_step',
'get_or_create_global_step',
'get_local_variables',
'get_model_variables',
'get_trainable_variables',
'get_unique_variable',
'get_variables_by_name',
'get_variables_by_suffix',
'get_variable_full_name',
'get_variables_to_restore',
'get_variables',
'global_variable',
'local_variable',
'model_variable',
'variable',
'VariableDeviceChooser',
'zero_initializer']
def zero_initializer(ref, use_locking=True, name="zero_initializer"):
"""Initialize 'ref' with all zeros, ref tensor should be uninitialized.
If already initialized, you will get ValueError. This op is intended to
save memory during initialization.
Args:
ref: ref of the tensor need to be zero initialized.
name: optional name for this operation.
Returns:
ref that initialized.
Raises:
ValueError: If ref tensor is initialized.
"""
loader.load_op_library(
resource_loader.get_path_to_datafile("_variable_ops.so"))
return gen_variable_ops.zero_initializer(ref, name=name)
@deprecated(None, "Please switch to tf.train.assert_global_step")
def assert_global_step(global_step_tensor):
training_util.assert_global_step(global_step_tensor)
def assert_or_get_global_step(graph=None, global_step_tensor=None):
"""Verifies that a global step tensor is valid or gets one if None is given.
If `global_step_tensor` is not None, check that it is a valid global step
tensor (using `assert_global_step`). Otherwise find a global step tensor using
`get_global_step` and return it.
Args:
graph: The graph to find the global step tensor for.
global_step_tensor: The tensor to check for suitability as a global step.
If None is given (the default), find a global step tensor.
Returns:
A tensor suitable as a global step, or `None` if none was provided and none
was found.
"""
if global_step_tensor is None:
# Get the global step tensor the same way the supervisor would.
global_step_tensor = get_global_step(graph)
else:
assert_global_step(global_step_tensor)
return global_step_tensor
@deprecated(None, "Please switch to tf.train.get_global_step")
def get_global_step(graph=None):
return training_util.get_global_step(graph)
@deprecated(None, "Please switch to tf.train.create_global_step")
def create_global_step(graph=None):
"""Create global step tensor in graph.
This API is deprecated. Use core framework training version instead.
Args:
graph: The graph in which to create the global step tensor. If missing,
use default graph.
Returns:
Global step tensor.
Raises:
ValueError: if global step tensor is already defined.
"""
return training_util.create_global_step(graph)
@deprecated(None, "Please switch to tf.train.get_or_create_global_step")
def get_or_create_global_step(graph=None):
"""Returns and create (if necessary) the global step tensor.
Args:
graph: The graph in which to create the global step tensor. If missing, use
default graph.
Returns:
The global step tensor.
"""
return training_util.get_or_create_global_step(graph)
def local_variable(initial_value,
validate_shape=True,
name=None,
use_resource=None):
"""Create a variable with a value and add it to `GraphKeys.LOCAL_VARIABLES`.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape,
use_resource=use_resource,
name=name)
def global_variable(initial_value,
validate_shape=True,
name=None,
use_resource=None):
"""Create a variable with a value and add it to `GraphKeys.GLOBAL_VARIABLES`.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
validate_shape=validate_shape,
use_resource=use_resource,
name=name)
@contrib_add_arg_scope
def variable(name, shape=None, dtype=None, initializer=None,
regularizer=None, trainable=True, collections=None,
caching_device=None, device=None,
partitioner=None, custom_getter=None, use_resource=None):
"""Gets an existing variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
If None it would default to `tf.GraphKeys.GLOBAL_VARIABLES`.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal
get_variable method and has to have the same signature.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
The created or existing variable.
"""
collections = list(collections if collections is not None
else [ops.GraphKeys.GLOBAL_VARIABLES])
# Remove duplicates
collections = list(set(collections))
getter = variable_scope.get_variable
if custom_getter is not None:
getter = functools.partial(custom_getter,
reuse=variable_scope.get_variable_scope().reuse)
with ops.device(device or ''):
return getter(name, shape=shape, dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
use_resource=use_resource)
@contrib_add_arg_scope
def model_variable(name, shape=None, dtype=dtypes.float32, initializer=None,
regularizer=None, trainable=True, collections=None,
caching_device=None, device=None, partitioner=None,
custom_getter=None, use_resource=None):
"""Gets an existing model variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
Note that the variable is always also added to the
`GraphKeys.GLOBAL_VARIABLES` and `GraphKeys.MODEL_VARIABLES` collections.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal
get_variable method and has to have the same signature.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
The created or existing variable.
"""
collections = list(collections or [])
collections += [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.MODEL_VARIABLES]
var = variable(name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable, collections=collections,
caching_device=caching_device, device=device,
partitioner=partitioner, custom_getter=custom_getter,
use_resource=use_resource)
return var
def add_model_variable(var):
"""Adds a variable to the `GraphKeys.MODEL_VARIABLES` collection.
Args:
var: a variable.
"""
if var not in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES):
ops.add_to_collection(ops.GraphKeys.MODEL_VARIABLES, var)
def get_variables(scope=None, suffix=None,
collection=ops.GraphKeys.GLOBAL_VARIABLES):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return. Can be a
variable scope or a string.
suffix: an optional suffix for filtering the variables to return.
collection: in which collection search for. Defaults to
`GraphKeys.GLOBAL_VARIABLES`.
Returns:
a list of variables in collection with scope and suffix.
"""
if isinstance(scope, variable_scope.VariableScope):
scope = scope.name
if suffix is not None:
if ':' not in suffix:
suffix += ':'
scope = (scope or '') + '.*' + suffix
return ops.get_collection(collection, scope)
def get_model_variables(scope=None, suffix=None):
"""Gets the list of model variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.MODEL_VARIABLES)
def get_local_variables(scope=None, suffix=None):
"""Gets the list of local variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.LOCAL_VARIABLES)
def get_trainable_variables(scope=None, suffix=None):
"""Gets the list of trainable variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in the trainable collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.TRAINABLE_VARIABLES)
def get_variables_to_restore(include=None, exclude=None):
"""Gets the list of the variables to restore.
Args:
include: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to include. None would include all
the variables.
exclude: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to exclude. None it would not
exclude any.
Returns:
a list of variables to restore.
Raises:
TypeError: include or exclude is provided but is not a list or a tuple.
"""
if include is None:
# Include all variables.
vars_to_include = get_variables()
else:
if not isinstance(include, (list, tuple)):
raise TypeError('include is provided but is not a list or a tuple.')
vars_to_include = []
for scope in include:
vars_to_include += get_variables(scope)
vars_to_exclude = set()
if exclude is not None:
if not isinstance(exclude, (list, tuple)):
raise TypeError('exclude is provided but is not a list or a tuple.')
for scope in exclude:
vars_to_exclude |= set(get_variables(scope))
# Exclude the variables in vars_to_exclude
return [v for v in vars_to_include if v not in vars_to_exclude]
def get_variables_by_suffix(suffix, scope=None):
"""Gets the list of variables that end with the given suffix.
Args:
suffix: suffix for filtering the variables to return.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=suffix)
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without any scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and scope.
"""
suffix = '/' + given_name + ':|^' + given_name + ':'
return get_variables(scope=scope, suffix=suffix)
def get_unique_variable(var_op_name):
"""Gets the variable uniquely identified by that var_op_name.
Args:
var_op_name: the full name of the variable op, including the scope.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists.
"""
candidates = get_variables(scope=var_op_name)
if not candidates:
raise ValueError('Couldn\'t find variable %s' % var_op_name)
for candidate in candidates:
if candidate.op.name == var_op_name:
return candidate
raise ValueError('Variable %s does not uniquely identify a variable' %
var_op_name)
def assign_from_values(var_names_to_values):
"""Creates an assignment operation from a given mapping.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
assign_op: An `Operation` that assigns each of the given variables to the
requested values.
feed_dict: The feed dictionary to use when evaluating `assign_op`.
Raises:
ValueError: if any of the given variable names were not found.
"""
feed_dict = {}
assign_ops = []
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, var_name)
if not var:
raise ValueError('Variable %s wasn\'t found' % var_name)
elif len(var) > 1:
# tf.get_collection is just a filter on the prefix: find the exact match:
found = False
for v in var:
if v.op.name == var_name:
var = v
found = True
break
if not found:
raise ValueError('Variable %s doesn\'t uniquely identify a variable' %
var_name)
else:
var = var[0]
# TODO(nsilberman): ensure placeholder and assign are on the same device.
# Assign a placeholder to the value that will be filled later.
placeholder_name = 'placeholder/' + var.op.name
placeholder_value = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name=placeholder_name)
assign_ops.append(var.assign(placeholder_value))
feed_dict[placeholder_value] = var_value.reshape(var.get_shape())
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
def assign_from_values_fn(var_names_to_values):
"""Returns a function that assigns specific variables from the given values.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation.
Raises:
ValueError: if any of the given variable names were not found.
"""
assign_op, feed_dict = assign_from_values(var_names_to_values)
def callback(session):
return session.run(assign_op, feed_dict)
return callback
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
def get_variable_full_name(var):
"""Returns the full name of a variable.
For normal Variables, this is the same as the var.op.name. For
sliced or PartitionedVariables, this name is the same for all the
slices/partitions. In both cases, this is normally the name used in
a checkpoint file.
Args:
var: A `Variable` object.
Returns:
A string that is the full name.
"""
if var._save_slice_info:
return var._save_slice_info.full_name
else:
return var.op.name
# TODO(nsilberman): add flag to load exponential moving averages instead
#
# TODO(sguada): Update docs in slim/g3doc/index.md to describe
# the new feature where the var_list dictionary can have values that
# are each a list of Variables.
def assign_from_checkpoint(model_path, var_list, ignore_missing_vars=False):
"""Creates an operation to assign specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of (possibly partitioned) `Variable` objects
or a dictionary mapping names in the checkpoint to the
corresponding variables or list of variables to initialize
from that checkpoint value. For partitioned Variables, the
name in the checkpoint must be the full variable, not the
name of the partitioned variable, eg. "my_var" rather than
"my_var/part_4". If empty, returns no_op(), {}.
ignore_missing_vars: Boolean, if True ignore variables missing in the
checkpoint with a warning instead of failing.
Returns:
the restore_op and the feed_dict that need to be run to restore var_list.
Raises:
ValueError: If `ignore_missing_vars` is False and the checkpoint specified
at `model_path` is missing one of the variables in `var_list`.
"""
# Normalize var_list into a dictionary mapping names in the
# checkpoint to the list of variables to initialize from that
# checkpoint variable. Sliced (including partitioned) variables will
# end up under the same key.
grouped_vars = {}
if isinstance(var_list, (tuple, list)):
for var in var_list:
ckpt_name = get_variable_full_name(var)
if ckpt_name not in grouped_vars:
grouped_vars[ckpt_name] = []
grouped_vars[ckpt_name].append(var)
else:
for ckpt_name, value in var_list.items():
if isinstance(value, (tuple, list)):
grouped_vars[ckpt_name] = value
else:
grouped_vars[ckpt_name] = [value]
# Read each checkpoint entry. Create a placeholder variable and
# add the (possibly sliced) data from the checkpoint to the feed_dict.
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
feed_dict = {}
assign_ops = []
for ckpt_name in grouped_vars:
if not reader.has_tensor(ckpt_name):
log_str = 'Checkpoint is missing variable [%s]' % ckpt_name
if ignore_missing_vars:
logging.warning(log_str)
continue
else:
raise ValueError(log_str)
ckpt_value = reader.get_tensor(ckpt_name)
for var in grouped_vars[ckpt_name]:
placeholder_tensor = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name='placeholder/' + var.op.name)
assign_ops.append(var.assign(placeholder_tensor))
if not var._save_slice_info:
if var.get_shape() != ckpt_value.shape:
raise ValueError(
'Total size of new array must be unchanged for %s '
'lh_shape: [%s], rh_shape: [%s]'
% (ckpt_name, str(ckpt_value.shape), str(var.get_shape())))
feed_dict[placeholder_tensor] = ckpt_value.reshape(ckpt_value.shape)
else:
slice_dims = zip(var._save_slice_info.var_offset,
var._save_slice_info.var_shape)
slice_dims = [(start, start + size) for (start, size) in slice_dims]
slice_dims = [slice(*x) for x in slice_dims]
slice_value = ckpt_value[slice_dims]
slice_value = slice_value.reshape(var._save_slice_info.var_shape)
feed_dict[placeholder_tensor] = slice_value
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
# pylint: enable=protected-access
def assign_from_checkpoint_fn(model_path, var_list, ignore_missing_vars=False,
reshape_variables=False):
"""Returns a function that assigns specific variables from a checkpoint.
If ignore_missing_vars is True and no variables are found in the checkpoint
it returns None.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of `Variable` objects or a dictionary mapping names in the
checkpoint to the corresponding variables to initialize. If empty or
`None`, it would return `no_op(), None`.
ignore_missing_vars: Boolean, if True it would ignore variables missing in
the checkpoint with a warning instead of failing.
reshape_variables: Boolean, if True it would automatically reshape variables
which are of different shape then the ones stored in the checkpoint but
which have the same number of elements.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation. If no matching variables were found in the checkpoint
then `None` is returned.
Raises:
ValueError: If var_list is empty.
"""
if not var_list:
raise ValueError('var_list cannot be empty')
if ignore_missing_vars:
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
if isinstance(var_list, dict):
var_dict = var_list
else:
var_dict = {var.op.name: var for var in var_list}
available_vars = {}
for var in var_dict:
if reader.has_tensor(var):
available_vars[var] = var_dict[var]
else:
logging.warning(
'Variable %s missing in checkpoint %s', var, model_path)
var_list = available_vars
if var_list:
saver = tf_saver.Saver(var_list, reshape=reshape_variables)
def callback(session):
saver.restore(session, model_path)
return callback
else:
logging.warning('No Variables to restore')
return None
class VariableDeviceChooser(object):
"""Device chooser for variables.
When using a parameter server it will assign them in a round-robin fashion.
When not using a parameter server it allows GPU or CPU placement.
"""
def __init__(self,
num_tasks=0,
job_name='ps',
device_type='CPU',
device_index=0):
"""Initialize VariableDeviceChooser.
Usage:
To use with 2 parameter servers:
VariableDeviceChooser(2)
To use without parameter servers:
VariableDeviceChooser()
VariableDeviceChooser(device_type='GPU') # For GPU placement
Args:
num_tasks: number of tasks.
job_name: String, a name for the parameter server job.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left
unspecified, device represents 'any' device_index.
"""
self._job_name = job_name
self._device_type = device_type
self._device_index = device_index
self._num_tasks = num_tasks
self._next_task_id = 0
def __call__(self, op):
device_spec = tf_device.DeviceSpec(device_type=self._device_type,
device_index=self._device_index)
if self._num_tasks > 0:
task_id = self._next_task_id
self._next_task_id = (self._next_task_id + 1) % self._num_tasks
device_spec.job = self._job_name
device_spec.task = task_id
return device_spec.to_string()
def filter_variables(var_list, include_patterns=None, exclude_patterns=None,
reg_search=True):
"""Filter a list of variables using regular expressions.
First includes variables according to the list of include_patterns.
Afterwards, eliminates variables according to the list of exclude_patterns.
For example, one can obtain a list of variables with the weights of all
convolutional layers (depending on the network definition) by:
```python
variables = tf.contrib.framework.get_model_variables()
conv_weight_variables = tf.contrib.framework.filter_variables(
variables,
include_patterns=['Conv'],
exclude_patterns=['biases', 'Logits'])
```
Args:
var_list: list of variables.
include_patterns: list of regular expressions to include. Defaults to None,
which means all variables are selected according to the include rules.
A variable is included if it matches any of the include_patterns.
exclude_patterns: list of regular expressions to exclude. Defaults to None,
which means all variables are selected according to the exclude rules.
A variable is excluded if it matches any of the exclude_patterns.
reg_search: boolean. If True (default), performs re.search to find matches
(i.e. pattern can match any substring of the variable name). If False,
performs re.match (i.e. regexp should match from the beginning of the
variable name).
Returns:
filtered list of variables.
"""
if reg_search:
reg_exp_func = re.search
else:
reg_exp_func = re.match
# First include variables.
if include_patterns is None:
included_variables = list(var_list)
else:
included_variables = []
for var in var_list:
if any(reg_exp_func(ptrn, var.name) for ptrn in include_patterns):
included_variables.append(var)
# Afterwards, exclude variables.
if exclude_patterns is None:
filtered_variables = included_variables
else:
filtered_variables = []
for var in included_variables:
if not any(reg_exp_func(ptrn, var.name) for ptrn in exclude_patterns):
filtered_variables.append(var)
return filtered_variables
|
|
import numpy as np
import pytest
from pandas import Float64Index, Int64Index, Series, UInt64Index
import pandas._testing as tm
@pytest.fixture
def index_large():
# large values used in UInt64Index tests where no compat needed with Int64/Float64
large = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25]
return UInt64Index(large)
class TestGetLoc:
def test_get_loc_float64(self):
idx = Float64Index([0.0, 1.0, 2.0])
for method in [None, "pad", "backfill", "nearest"]:
assert idx.get_loc(1, method) == 1
if method is not None:
assert idx.get_loc(1, method, tolerance=0) == 1
for method, loc in [("pad", 1), ("backfill", 2), ("nearest", 1)]:
assert idx.get_loc(1.1, method) == loc
assert idx.get_loc(1.1, method, tolerance=0.9) == loc
with pytest.raises(KeyError, match="^'foo'$"):
idx.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.5$"):
idx.get_loc(1.5)
with pytest.raises(KeyError, match=r"^1\.5$"):
idx.get_loc(1.5, method="pad", tolerance=0.1)
with pytest.raises(KeyError, match="^True$"):
idx.get_loc(True)
with pytest.raises(KeyError, match="^False$"):
idx.get_loc(False)
with pytest.raises(ValueError, match="must be numeric"):
idx.get_loc(1.4, method="nearest", tolerance="foo")
with pytest.raises(ValueError, match="must contain numeric elements"):
idx.get_loc(1.4, method="nearest", tolerance=np.array(["foo"]))
with pytest.raises(
ValueError, match="tolerance size must match target index size"
):
idx.get_loc(1.4, method="nearest", tolerance=np.array([1, 2]))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
assert idx.get_loc(1) == 1
assert idx.get_loc(np.nan) == 0
idx = Float64Index([np.nan, 1, np.nan])
assert idx.get_loc(1) == 1
# FIXME: dont leave commented-out
# representable by slice [0:2:2]
# pytest.raises(KeyError, idx.slice_locs, np.nan)
sliced = idx.slice_locs(np.nan)
assert isinstance(sliced, tuple)
assert sliced == (0, 3)
# not representable by slice
idx = Float64Index([np.nan, 1, np.nan, np.nan])
assert idx.get_loc(1) == 1
msg = "'Cannot get left slice bound for non-unique label: nan"
with pytest.raises(KeyError, match=msg):
idx.slice_locs(np.nan)
def test_get_loc_missing_nan(self):
# GH#8569
idx = Float64Index([1, 2])
assert idx.get_loc(1) == 0
with pytest.raises(KeyError, match=r"^3$"):
idx.get_loc(3)
with pytest.raises(KeyError, match="^nan$"):
idx.get_loc(np.nan)
with pytest.raises(TypeError, match=r"'\[nan\]' is an invalid key"):
# listlike/non-hashable raises TypeError
idx.get_loc([np.nan])
class TestGetIndexer:
def test_get_indexer_float64(self):
idx = Float64Index([0.0, 1.0, 2.0])
tm.assert_numpy_array_equal(
idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)
)
target = [-0.1, 0.5, 1.1]
tm.assert_numpy_array_equal(
idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp)
)
def test_get_indexer_nan(self):
# GH#7820
result = Float64Index([1, 2, np.nan]).get_indexer([np.nan])
expected = np.array([2], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_int64(self):
index = Int64Index(range(0, 20, 2))
target = Int64Index(np.arange(10))
indexer = index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = Int64Index(np.arange(10))
indexer = index.get_indexer(target, method="pad")
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = Int64Index(np.arange(10))
indexer = index.get_indexer(target, method="backfill")
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_uint64(self, index_large):
target = UInt64Index(np.arange(10).astype("uint64") * 5 + 2 ** 63)
indexer = index_large.get_indexer(target)
expected = np.array([0, -1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = UInt64Index(np.arange(10).astype("uint64") * 5 + 2 ** 63)
indexer = index_large.get_indexer(target, method="pad")
expected = np.array([0, 0, 1, 2, 3, 4, 4, 4, 4, 4], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = UInt64Index(np.arange(10).astype("uint64") * 5 + 2 ** 63)
indexer = index_large.get_indexer(target, method="backfill")
expected = np.array([0, 1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
class TestWhere:
@pytest.mark.parametrize(
"index",
[
Float64Index(np.arange(5, dtype="float64")),
Int64Index(range(0, 20, 2)),
UInt64Index(np.arange(5, dtype="uint64")),
],
)
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass, index):
cond = [True] * len(index)
expected = index
result = index.where(klass(cond))
cond = [False] + [True] * (len(index) - 1)
expected = Float64Index([index._na_value] + index[1:].tolist())
result = index.where(klass(cond))
tm.assert_index_equal(result, expected)
class TestTake:
@pytest.mark.parametrize("klass", [Float64Index, Int64Index, UInt64Index])
def test_take_preserve_name(self, klass):
index = klass([1, 2, 3, 4], name="foo")
taken = index.take([3, 0, 1])
assert index.name == taken.name
def test_take_fill_value_float64(self):
# GH 12631
idx = Float64Index([1.0, 2.0, 3.0], name="xxx")
result = idx.take(np.array([1, 0, -1]))
expected = Float64Index([2.0, 1.0, 3.0], name="xxx")
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = Float64Index([2.0, 1.0, np.nan], name="xxx")
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = Float64Index([2.0, 1.0, 3.0], name="xxx")
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for (axis 0 with )?size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
@pytest.mark.parametrize("klass", [Int64Index, UInt64Index])
def test_take_fill_value_ints(self, klass):
# see gh-12631
idx = klass([1, 2, 3], name="xxx")
result = idx.take(np.array([1, 0, -1]))
expected = klass([2, 1, 3], name="xxx")
tm.assert_index_equal(result, expected)
name = klass.__name__
msg = f"Unable to fill values because {name} cannot contain NA"
# fill_value=True
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -1]), fill_value=True)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = klass([2, 1, 3], name="xxx")
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for (axis 0 with )?size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestContains:
def test_contains_float64_nans(self):
index = Float64Index([1.0, 2.0, np.nan])
assert np.nan in index
def test_contains_float64_not_nans(self):
index = Float64Index([1.0, 2.0, np.nan])
assert 1.0 in index
|
|
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Step classes for embedding tables."""
from lingvo import compat as tf
from lingvo.core import layers
from lingvo.core import py_utils
from lingvo.core import recurrent
from lingvo.core import step
class EmbeddingStep(step.Step):
"""A simple wrapper around EmbeddingLayer and its subclasses.
This class can be used to insert an embedding lookup at the input side
of a GraphStep or StackStep.
"""
@classmethod
def Params(cls):
p = super().Params()
p.name = 'emb_step'
p.Define('emb',
layers.EmbeddingLayer.Params().Set(max_num_shards=1),
'Embedding layer params.')
return p
def __init__(self, params):
super().__init__(params)
p = params
self.CreateChild('emb', p.emb)
def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):
"""Looks up a list of embeddings from an EmbeddingLayer.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
prepared_inputs: unused.
step_inputs: A NestedMap containing a list called inputs. This list should
contain a single integer tensor of shape [batch], where each integer
represents an index into the embedding table. (By convention, all Steps
that can be used with StackStep must store inputs in
step_inputs.inputs[], but in this step it does not make sense for that
list to have more than one tensor in it).
padding: unused.
state0: unused.
Returns:
A params.dtype tensor of shape [batch, embedding_dim].
"""
del prepared_inputs
del state0
assert len(step_inputs.inputs) == 1
output = self.emb.EmbLookup(theta.emb, step_inputs.inputs[0])
return py_utils.NestedMap(output=output), py_utils.NestedMap()
class StatefulEmbeddingStep(EmbeddingStep):
"""Simple wrapper for keeping a state of the tokens previously emitted."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('target_sos_id', 1, 'ID of the start of sentence token.')
p.Define('num_prev_tokens', 0,
'The number of previous tokens to keep in state.')
p.Define('include_current_token', False,
'Include current token in embedding lookup')
p.Define('embedding_dim', None, 'Do not use. Define it in child emb.')
p.Define('vocab_size', None, 'Do not use. Define it in child emb.')
p.name = 'stateful_emb_step'
return p
def __init__(self, params):
super().__init__(params)
p = params
p.vocab_size = p.emb.vocab_size
# Output dimensionality
assert p.embedding_dim == p.emb.embedding_dim if not hasattr(
p.emb,
'output_dim') else p.emb.output_dim, 'Inconsistent embedding_dim!'
# Total number of tokens
self.num_tokens = p.num_prev_tokens + int(p.include_current_token)
assert self.num_tokens > 0, 'Number of tokens is zero!'
# If embedding supports multiple tokens, then it must have a num_tokens
# param, otherwise the num_tokens must be 1
if hasattr(p.emb, 'num_tokens'):
assert p.emb.num_tokens == self.num_tokens, 'Inconsistent num_tokens!'
else:
assert self.num_tokens == 1, ('Since p.emb does not have the num_tokens '
'param, p.num_prev_tokens and '
'p.include_current_token must sum to 1')
def ZeroState(self, theta, prepared_inputs, batch_size):
p = self.params
state0 = super().ZeroState(theta, prepared_inputs, batch_size)
state0.prev_ids = tf.ones([batch_size, p.num_prev_tokens],
dtype=tf.float32) * p.target_sos_id
state0.embedding = tf.zeros([batch_size, p.embedding_dim], dtype=tf.float32)
return state0
def zero_state(self, theta, batch_size):
return self.ZeroState(theta, None, batch_size)
def EmbLookup(self, theta, ids, state0):
"""Use this layer like a regular EmbeddingLayer (ids -> embeddings).
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
ids: A tensor of token ids with dimensions [t, b].
state0: A NestedMap containing the state of previous tokens.
- prev_ids: A Tensor containing the n previous token ids. [batch,
num_prev_tokens]. Each row is the token ids at t-1, ..., t-n.
- embeddings: The output embeddings of the previous step.
Returns:
Embeddings time major [t, b, d] and next state
"""
def _FPropWrapper(theta, state0, inputs):
embedding, state1 = self.FProp(
theta,
None,
inputs,
None,
state0,
)
state1.embedding = embedding.output
return state1, py_utils.NestedMap()
steps_input = py_utils.NestedMap(inputs=[tf.cast(ids, tf.float32)])
state1, _ = recurrent.Recurrent(
theta=theta,
state0=state0,
inputs=steps_input,
cell_fn=_FPropWrapper,
)
# for training, we want the output across all timesteps
sequence_of_embeddings = state1.embedding
# for inference, we want just the last timestep's state, not all states
state1.embedding = state1.embedding[-1]
state1.prev_ids = state1.prev_ids[-1]
return sequence_of_embeddings, state1
def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):
"""Calls an embedding lookup and updates the state of token history.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
prepared_inputs: unused.
step_inputs: A NestedMap containing a list called inputs. This list should
contain a single float32 (will be converted to int32 later) tensor of
shape [batch], where each value represents an index into the embedding
table. (By convention, all Steps that can be used with StackStep must
store inputs in step_inputs.inputs[], but in this step it does not make
sense for that list to have more than one tensor in it).
padding: unused.
state0: A NestedMap containing the state of previous tokens.
- prev_ids: A Tensor containing the n previous token ids. [batch,
num_prev_tokens]. Each row is the token ids at t-1, ..., t-n.
Returns:
Embedding vectors [batch, p.embedding_dim] and new state
"""
p = self.params
# prepare token ids
if p.include_current_token:
ids = tf.concat([
tf.cast(step_inputs.inputs[0][:, None], tf.float32),
tf.cast(state0.prev_ids, tf.float32)
],
axis=-1)
else:
ids = state0.prev_ids
# lookup embedding. ids.shape is [batch, num_tokens]
ids = tf.cast(ids, tf.int32)
embedding = self.emb.EmbLookup(theta.emb, ids)
embedding = tf.reshape(embedding, [-1, p.embedding_dim])
# update state
state1 = state0.copy()
if p.num_prev_tokens > 0:
state1.prev_ids = tf.concat([
tf.cast(step_inputs.inputs[0][:, None], tf.float32),
tf.cast(state0.prev_ids[:, :-1], tf.float32)
],
axis=-1)
state1.prev_ids = tf.ensure_shape(
state1.prev_ids, [None, p.num_prev_tokens],
name='prev_ids_shape_validation')
state1.embedding = embedding
return py_utils.NestedMap(output=embedding), state1
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reading metadata from MLMD instances in TFX-OSS pipelines."""
import enum
import os
from typing import Any, Dict, Iterable, List, Optional, Sequence, Union
from absl import logging
import attr
from model_card_toolkit import model_card as model_card_module
import tensorflow as tf
import tensorflow_model_analysis as tfma
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
# A list of artifact type names used by TFX 0.21 and later versions.
_TFX_DATASET_TYPE = 'Examples'
_TFX_STATS_TYPE = 'ExampleStatistics'
_TFX_MODEL_TYPE = 'Model'
_TFX_METRICS_TYPE = 'ModelEvaluation'
_TFX_TRAINER_TYPE = 'tfx.components.trainer.component.Trainer'
# Map of data types to field names in a TFMA arrayValue
_TYPE_FIELD_MAP = {
'BYTES': 'bytesValues',
'INT32': 'int32Values',
'INT64': 'int64Values',
'FLOAT32': 'float32Values',
'FLOAT64': 'float64Values'
}
@attr.s(auto_attribs=True)
class PipelineTypes(object):
"""A registry of required MLMD types about a TFX pipeline."""
# a list of required artifact types
dataset_type: metadata_store_pb2.ArtifactType
stats_type: metadata_store_pb2.ArtifactType
model_type: metadata_store_pb2.ArtifactType
metrics_type: metadata_store_pb2.ArtifactType
# a list of required execution types
trainer_type: metadata_store_pb2.ExecutionType
def _get_tfx_pipeline_types(store: mlmd.MetadataStore) -> PipelineTypes:
"""Retrieves the registered types in the given `store`.
Args:
store: A ml-metadata MetadataStore to retrieve ArtifactTypes from.
Returns:
A instance of PipelineTypes containing store pipeline types.
Raises:
ValueError: If the `store` does not have MCT related types and is not
considered a valid TFX store.
"""
artifact_types = {atype.name: atype for atype in store.get_artifact_types()}
expected_artifact_types = {
_TFX_DATASET_TYPE, _TFX_STATS_TYPE, _TFX_MODEL_TYPE, _TFX_METRICS_TYPE
}
missing_types = expected_artifact_types.difference(artifact_types.keys())
if missing_types:
raise ValueError(
f'Given `store` is invalid: missing ArtifactTypes: {missing_types}.')
execution_types = {etype.name: etype for etype in store.get_execution_types()}
expected_execution_types = {_TFX_TRAINER_TYPE}
missing_types = expected_execution_types.difference(execution_types.keys())
if missing_types:
raise ValueError(
f'Given `store` is invalid: missing ExecutionTypes: {missing_types}.')
return PipelineTypes(
dataset_type=artifact_types[_TFX_DATASET_TYPE],
stats_type=artifact_types[_TFX_STATS_TYPE],
model_type=artifact_types[_TFX_MODEL_TYPE],
metrics_type=artifact_types[_TFX_METRICS_TYPE],
trainer_type=execution_types[_TFX_TRAINER_TYPE])
def _validate_model_id(store: mlmd.MetadataStore,
model_type: metadata_store_pb2.ArtifactType,
model_id: int) -> metadata_store_pb2.Artifact:
"""Validates the given `model_id` against the `store`.
Args:
store: A ml-metadata MetadataStore to be validated.
model_type: The Model ArtifactType in the `store`.
model_id: The id for the model artifact in the `store`.
Returns:
The model artifact with the id.
Raises:
ValueError: If the `model_id` cannot be resolved as a Model artifact in the
given `store`.
"""
model_artifacts = store.get_artifacts_by_id([model_id])
if not model_artifacts:
raise ValueError(f'Input model_id cannot be found: {model_id}.')
model = model_artifacts[0]
if model.type_id != model_type.id:
raise ValueError(
f'Found artifact with `model_id` is not an instance of Model: {model}.')
return model
@enum.unique
class _Direction(enum.Enum):
"""An enum of directions when traversing MLMD lineage."""
ANCESTOR = 1
SUCCESSOR = 2
def _get_one_hop_artifacts(
store: mlmd.MetadataStore,
artifact_ids: Iterable[int],
direction: _Direction,
filter_type: Optional[metadata_store_pb2.ArtifactType] = None
) -> List[metadata_store_pb2.Artifact]:
"""Gets a list of artifacts within 1-hop neighborhood of the `artifact_ids`.
Args:
store: A ml-metadata MetadataStore to look for neighborhood artifacts.
artifact_ids: The artifacts' ids in the `store`.
direction: A direction to specify whether returning ancestors or successors.
filter_type: An optional type filter of the returned artifacts, if given
then only artifacts of that type is returned.
Returns:
A list of qualified artifacts within 1-hop neighborhood in the `store`.
"""
traverse_events = {}
if direction == _Direction.ANCESTOR:
traverse_events['execution'] = (metadata_store_pb2.Event.OUTPUT,
metadata_store_pb2.Event.DECLARED_OUTPUT)
traverse_events['artifact'] = (metadata_store_pb2.Event.INPUT,
metadata_store_pb2.Event.DECLARED_INPUT)
elif direction == _Direction.SUCCESSOR:
traverse_events['execution'] = (metadata_store_pb2.Event.INPUT,
metadata_store_pb2.Event.DECLARED_INPUT)
traverse_events['artifact'] = (metadata_store_pb2.Event.OUTPUT,
metadata_store_pb2.Event.DECLARED_OUTPUT)
executions_ids = set(
event.execution_id
for event in store.get_events_by_artifact_ids(artifact_ids)
if event.type in traverse_events['execution'])
artifacts_ids = set(
event.artifact_id
for event in store.get_events_by_execution_ids(executions_ids)
if event.type in traverse_events['artifact'])
return [
artifact for artifact in store.get_artifacts_by_id(artifacts_ids)
if not filter_type or artifact.type_id == filter_type.id
]
def _get_one_hop_executions(
store: mlmd.MetadataStore,
artifact_ids: Iterable[int],
direction: _Direction,
filter_type: Optional[metadata_store_pb2.ExecutionType] = None
) -> List[metadata_store_pb2.Execution]:
"""Gets a list of executions within 1-hop neighborhood of the `artifact_ids`.
Args:
store: A ml-metadata MetadataStore to look for neighborhood executions.
artifact_ids: The artifacts' ids in the `store`.
direction: A direction to specify whether returning ancestors or successors.
filter_type: An optional type filter of the returned executions, if given
then only executions of that type is returned.
Returns:
A list of qualified executions within 1-hop neighborhood in the `store`.
"""
if direction == _Direction.ANCESTOR:
traverse_event = (metadata_store_pb2.Event.OUTPUT,
metadata_store_pb2.Event.DECLARED_OUTPUT)
elif direction == _Direction.SUCCESSOR:
traverse_event = (metadata_store_pb2.Event.INPUT,
metadata_store_pb2.Event.DECLARED_INPUT)
executions_ids = set(
event.execution_id
for event in store.get_events_by_artifact_ids(artifact_ids)
if event.type in traverse_event)
return [
execution for execution in store.get_executions_by_id(executions_ids)
if not filter_type or execution.type_id == filter_type.id
]
def get_metrics_artifacts_for_model(
store: mlmd.MetadataStore,
model_id: int,
pipeline_types: Optional[PipelineTypes] = None
) -> List[metadata_store_pb2.Artifact]:
"""Gets a list of evaluation artifacts from a model artifact.
It looks for the evaluator component runs that take the given model as input.
Then it returns the metrics artifact of that component run.
Args:
store: A ml-metadata MetadataStore to look for evaluation metrics.
model_id: The id for the model artifact in the `store`.
pipeline_types: An optional set of types if the `store` uses custom types.
Returns:
A list of metrics artifacts produced by the Evaluator component runs
which take the given model artifact as the input.
Raises:
ValueError: If the `model_id` cannot be resolved as a model artifact in the
given `store`.
"""
if not pipeline_types:
pipeline_types = _get_tfx_pipeline_types(store)
_validate_model_id(store, pipeline_types.model_type, model_id)
return _get_one_hop_artifacts(store, [model_id], _Direction.SUCCESSOR,
pipeline_types.metrics_type)
def get_stats_artifacts_for_model(
store: mlmd.MetadataStore,
model_id: int,
pipeline_types: Optional[PipelineTypes] = None
) -> List[metadata_store_pb2.Artifact]:
"""Gets a list of statistics artifacts from a model artifact.
It first looks for the input datasets of the trainer that produces the model.
If the model uses a transformed examples, it further looks for the original
dataset. Then it returns the statistics artifact of the found dataset(s).
Args:
store: A ml-metadata MetadataStore instance.
model_id: The id for the model artifact in the `store`.
pipeline_types: An optional set of types if the `store` uses custom types.
Returns:
A list of statistics artifacts produced by the StatsGen component runs
for the datasets which are used to train the model.
Raises:
ValueError: If the `model_id` cannot be resolved as a model artifact in the
given `store`.
"""
if not pipeline_types:
pipeline_types = _get_tfx_pipeline_types(store)
_validate_model_id(store, pipeline_types.model_type, model_id)
trainer_examples = _get_one_hop_artifacts(store, [model_id],
_Direction.ANCESTOR,
pipeline_types.dataset_type)
# If trainer takes transformed example, we look for its original dataset.
dataset_ids = set()
transformed_example_ids = set()
for example in trainer_examples:
if example.uri.find('/Transform/') != -1:
transformed_example_ids.add(example.id)
else:
dataset_ids.add(example.id)
dataset_ids.update(
dataset.id for dataset in _get_one_hop_artifacts(
store, transformed_example_ids, _Direction.ANCESTOR,
pipeline_types.dataset_type))
return _get_one_hop_artifacts(store, dataset_ids, _Direction.SUCCESSOR,
pipeline_types.stats_type)
def _property_value(
node: Union[metadata_store_pb2.Artifact, metadata_store_pb2.Execution,
metadata_store_pb2.Context],
name: str,
is_custom_property: bool = False) -> Optional[Union[int, float, str]]:
"""Given a MLMD node and a (custom) property name, returns its value if any.
Args:
node: A node in MLMD lineage graph. It is one of MLMD Artifact, Execution,
or Context.
name: The key of the properties or custom properties.
is_custom_property: Indicates whether the name is a custom property.
Returns:
The value of the property if found in the node; If not, returns None.
"""
properties = node.custom_properties if is_custom_property else node.properties
if name not in properties:
return None
if properties[name].WhichOneof('value') == 'int_value':
return properties[name].int_value
if properties[name].WhichOneof('value') == 'float_value':
return properties[name].double_value
return properties[name].string_value
def generate_model_card_for_model(
store: mlmd.MetadataStore,
model_id: int,
pipeline_types: Optional[PipelineTypes] = None
) -> model_card_module.ModelCard:
"""Populates model card properties for a model artifact.
It traverse the parents and children of the model artifact, and maps related
artifact properties and lineage information to model card property. The
graphics derived from the artifact payload are handled separately.
Args:
store: A ml-metadata MetadataStore instance.
model_id: The id for the model artifact in the `store`.
pipeline_types: An optional set of types if the `store` uses custom types.
Returns:
A ModelCard data object with the properties.
Raises:
ValueError: If the `model_id` cannot be resolved as a model artifact in the
given `store`.
"""
if not pipeline_types:
pipeline_types = _get_tfx_pipeline_types(store)
_validate_model_id(store, pipeline_types.model_type, model_id)
model_card = model_card_module.ModelCard()
model_details = model_card.model_details
trainers = _get_one_hop_executions(store, [model_id], _Direction.ANCESTOR,
pipeline_types.trainer_type)
if trainers:
model_details.name = _property_value(trainers[-1], 'module_file')
model_details.version.name = _property_value(trainers[0], 'checksum_md5')
model_details.references = [
model_card_module.Reference(
reference=_property_value(trainers[0], 'pipeline_name'))
]
return model_card
def read_stats_protos(
stats_artifact_uri: str
) -> List[statistics_pb2.DatasetFeatureStatisticsList]:
"""Reads DatasetFeatureStatisticsList protos from provided uri.
Args:
stats_artifact_uri: the output artifact path of a StatsGen component.
Returns:
For each DatasetFeatureStatisticsList found in the directory, return in a
list.
"""
stats_protos = []
for filename in tf.io.gfile.listdir(stats_artifact_uri):
if tf.io.gfile.isdir(os.path.join(stats_artifact_uri, filename)):
stats_proto = read_stats_proto(stats_artifact_uri, filename)
if stats_proto:
logging.info('Reading stats artifact from %s', filename)
stats_protos.append(stats_proto)
return stats_protos
def read_stats_proto(
stats_artifact_uri: str,
split: str) -> Optional[statistics_pb2.DatasetFeatureStatisticsList]:
"""Reads DatasetFeatureStatisticsList proto from provided stats artifact uri.
Args:
stats_artifact_uri: the output artifact path of a StatsGen component.
split: the data split to fetch stats from.
Returns:
If the artifact uri does not exist, returns None. Otherwise, returns the
eval split stats as DatasetFeatureStatisticsList.
"""
stats = statistics_pb2.DatasetFeatureStatisticsList()
feature_stats_path = os.path.join(stats_artifact_uri, split,
'FeatureStats.pb')
stats_tfrecord_path = os.path.join(stats_artifact_uri, split,
'stats_tfrecord')
if tf.io.gfile.exists(feature_stats_path):
with tf.io.gfile.GFile(feature_stats_path, mode='rb') as f:
stats.ParseFromString(f.read())
return stats
elif tf.io.gfile.exists(stats_tfrecord_path):
serialized_stats = next(
tf.compat.v1.io.tf_record_iterator(stats_tfrecord_path))
stats.ParseFromString(serialized_stats)
return stats
else:
logging.warning('No artifact found at %s or %s', stats_tfrecord_path,
feature_stats_path)
return None
def read_metrics_eval_result(
metrics_artifact_uri: str,
output_file_format: Optional[str] = None) -> Optional[tfma.EvalResult]:
"""Reads TFMA evaluation results from the evaluator output path.
Args:
metrics_artifact_uri: the output artifact path of a TFMA component.
output_file_format: an optional file format of the payload.
Returns:
A TFMA EvalResults named tuple including configs and sliced metrics.
Returns None if no slicing metrics found from `metrics_artifact_uri`.
"""
result = tfma.load_eval_result(
output_path=metrics_artifact_uri, output_file_format=output_file_format)
if not result.slicing_metrics:
logging.warning('Cannot load eval results from: %s', metrics_artifact_uri)
return None
return result
def annotate_eval_result_metrics(model_card: model_card_module.ModelCard,
eval_result: tfma.EvalResult):
"""Annotates model_card's PerformanceMetrics for every metric in eval_result.
Args:
model_card: The model card object.
eval_result: A `tfma.EvalResult`.
Raises:
ValueError: if eval_result is improperly formatted.
"""
def _parse_array_value(array: Dict[str, Any]) -> str:
data_type = array['dataType']
if data_type in _TYPE_FIELD_MAP:
type_field = _TYPE_FIELD_MAP[data_type]
return ', '.join([str(value) for value in array[type_field]])
else:
logging.warning('Received unexpected array %s', str(array))
return ''
for slice_repr, metrics_for_slice in (
eval_result.get_metrics_for_all_slices().items()):
# Parse the slice name
if not isinstance(slice_repr, tuple):
raise ValueError(
f'Expected EvalResult slices to be tuples; found {type(slice_repr)}')
slice_name = '_X_'.join(f'{a}_{b}' for a, b in slice_repr)
for metric_name, metric_value in metrics_for_slice.items():
# Parse the metric value
parsed_value = ''
if 'doubleValue' in metric_value:
parsed_value = metric_value['doubleValue']
elif 'boundedValue' in metric_value:
parsed_value = metric_value['boundedValue']['value']
elif 'arrayValue' in metric_value:
parsed_value = _parse_array_value(metric_value['arrayValue'])
else:
logging.warning(
'Expected doubleValue, boundedValue, or arrayValue; found %s',
metric_value.keys())
if parsed_value:
# Create the PerformanceMetric and append to the ModelCard
metric = model_card_module.PerformanceMetric(
type=metric_name, value=str(parsed_value), slice=slice_name)
model_card.quantitative_analysis.performance_metrics.append(metric)
def filter_metrics(
eval_result: tfma.EvalResult,
metrics_include: Optional[List[str]] = None,
metrics_exclude: Optional[List[str]] = None) -> tfma.EvalResult:
"""Filters metrics in a TFMA EvalResult.
Args:
eval_result: The TFMA EvalResult object.
metrics_include: The names of metrics to keep in the EvalResult. Mutually
exclusive with metrics_exclude.
metrics_exclude: The names of metrics to discard in the EvalResult. Mutually
exclusive with metrics_include.
Returns:
The eval_result with unwanted metrics filtered.
Raises:
ValueError: if both metrics_include and metrics_exclude are provided.
"""
if metrics_include and not metrics_exclude:
include = lambda metric_name: metric_name in metrics_include
elif metrics_exclude and not metrics_include:
include = lambda metric_name: metric_name not in metrics_exclude
else:
raise ValueError('filter_metrics() requires exactly one of metrics_include '
'and metrics_exclude.')
filtered_slicing_metrics = []
for slc, mtrc in eval_result.slicing_metrics:
filtered_mtrc = {}
for output_name in mtrc:
for subkey in mtrc[output_name]:
for mtrc_name in mtrc[output_name][subkey]:
if include(mtrc_name):
filtered_mtrc[output_name] = filtered_mtrc.get(output_name, {})
filtered_mtrc[output_name][subkey] = filtered_mtrc[output_name].get(
subkey, {})
filtered_mtrc[output_name][subkey][mtrc_name] = mtrc[output_name][
subkey][mtrc_name]
filtered_slicing_metrics.append(
tfma.view.SlicedMetrics(slice=slc, metrics=filtered_mtrc))
return tfma.EvalResult(
slicing_metrics=filtered_slicing_metrics,
plots=eval_result.plots,
attributions=eval_result.attributions,
config=eval_result.config,
data_location=eval_result.data_location,
file_format=eval_result.file_format,
model_location=eval_result.model_location)
def filter_features(
dataset_stats: statistics_pb2.DatasetFeatureStatistics,
features_include: Optional[Sequence[str]] = None,
features_exclude: Optional[Sequence[str]] = None
) -> statistics_pb2.DatasetFeatureStatistics:
"""Filters features in a TFDV DatasetFeatureStatistics.
Args:
dataset_stats: The TFDV DatasetFeatureStatistics object.
features_include: The names or paths of features to keep. Mutually exclusive
with features_exclude.
features_exclude: The names or paths of features to discard. Mutually
exclusive with features_include.
Returns:
The DatasetFeatureStatisticsList with unwanted features filtered.
Raises:
ValueError: if both or neither of features_include and features_exclude are
provided.
"""
# Check that inputs are valid, and create filter function
feature_name = lambda feature: feature.name or feature.path.step[0]
if features_include and not features_exclude:
include = lambda feature: feature_name(feature) in features_include
elif features_exclude and not features_include:
include = lambda feature: feature_name(feature) not in features_exclude
else:
raise ValueError('filter_features() requires exactly one of '
'features_include and features_exclude.')
# Create new DatasetFeatureStatistics
filtered_data_stats = statistics_pb2.DatasetFeatureStatistics()
filtered_data_stats.CopyFrom(dataset_stats)
# Filter out features, and write to DatasetFeatureStatistics
filtered_features = [
feature for feature in dataset_stats.features if include(feature)
]
del filtered_data_stats.features[:]
filtered_data_stats.features.extend(filtered_features)
# Return filtered DatasetFeatureStatistics
return filtered_data_stats
def read_stats_protos_and_filter_features(
stats_artifact_uri: str,
features_include: Optional[Sequence[str]] = None,
features_exclude: Optional[List[str]] = None
) -> List[statistics_pb2.DatasetFeatureStatisticsList]:
"""Reads DatasetFeatureStatisticsList protos and filters features.
Args:
stats_artifact_uri: the output artifact path of a StatsGen component.
features_include: The names or paths of features to keep. Mutually exclusive
with features_exclude.
features_exclude: The names or paths of features to discard. Mutually
exclusive with features_include.
Returns:
A list of DatasetFeatureStatisticsList from the provided path, with unwanted
features filtered.
Raises:
ValueError: if both or neither of features_include and features_exclude are
provided.
"""
data_stats = read_stats_protos(stats_artifact_uri)
for dsfl in data_stats:
filtered_datasets = [
filter_features(dataset, features_include, features_exclude)
for dataset in dsfl.datasets
]
del dsfl.datasets[:]
dsfl.datasets.extend(filtered_datasets)
return data_stats
|
|
# Copyright 2015 Myriam Johnson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''A program for playing with texture expansion.
Methods:
compare -- find (square of) colour-space distance between two pixels
compareRegion -- find weighted sum of colour-space distances between all
pixels in two texture regions
expand -- expand one texture into another
Author: mym
'''
from __future__ import print_function
#from math import sqrt
import texture
#import random
def compare(pix1, pix2):
'''Compare two pixels, returning square of the colour-space distance between
Arguments:
pix1 -- tuple containing the channels of the first pixel
pix2 -- tuple containing the channels of the second pixel
Return: square of colour space distance
Preconditions: both pixels have the same number of channels
'''
assert len(pix1) == len(pix2)
collect = 0
for pair in zip(pix1, pix2):
collect += (pair[0] - pair[1])**2
return collect
def compareRegion(tex1, tex2, cen1, cen2, region):
'''Compare regions of two Textures.
Returns the weighted sum of colour-space distances between corresponding
pixels, or Infinity if no pixels can be compared.
Arguments:
tex1, tex2 -- Textures to compare
cen1, cen2 -- 2-tuple centres of comparison regions
region -- list of 2-tuple shifts defining points for comparison
Returns: floating-point weighted sum of distances
Preconditions: region is valid about cen in both textures (untested)
'''
# abort if nothing to compare (avoid divide-by-zero)
if (len(region) == 0): return float('inf')
# loop over shifts
total = 0
for shift in region:
p1 = tex1.getPixel(cen1, shift)
p2 = tex2.getPixel(cen2, shift)
total += compare(p1, p2)
# weight by number of points compared
return float(total)/len(region)
def expand(source, target, near):
'''Expands the source texture into larger output
Arguments:
source -- Source Texture used to be expanded
target -- Target Texture to guide expansion
near -- Shape used for comparisons
Return: an Image containing the expanded texture
'''
# make sure the target has the same mode as the source
if (target.pic.mode != source.pic.mode):
target.pic = target.pic.convert(source.pic.mode)
# lists of all pixels in source, target for flatter iteration
slist = [(x,y)
for y in range(source.pic.size[1])
for x in range(source.pic.size[0])]
tlist = [(x,y)
for y in range(target.pic.size[1])
for x in range(target.pic.size[0])]
# for each target pixel...
for tloc in tlist:
# trim neighbourhood around this point
nearer = target.goodList(tloc, near.shift, target.valid)
# clear list of choices
choices = []
# loop over all source pixels
for sloc in slist:
# trim above neighbourhood around this point
nearest = source.goodList(sloc, nearer, source.valid)
# weighted texture distance of remaning region
weight = compareRegion(source, target, sloc, tloc, nearest)
# add tuple of weight and source pixel to choices
choices.append((weight, source.getPixel(sloc)))
# sort list, pick first
# TODO this gives lexical sort; want stable sort on only first element
# actually stable gives preference to input order,
# lexical gives preference to colour in RGB order
# what order is actually desired? (probably random)
# TODO weighted random choice
# sorting actually unnecessary, even for randomness
choices.sort()
newval = choices[0][1]
# shitty randomness - random of first ten
#newval = choices[random.randrange(10)][1]
# set the pixel!
target.setPixel(newval, tloc)
target.setValid(tloc)
# progress?
if (tloc[0] == 0): print("\nrow ", tloc[1], end = "")
print(".", end = "")
# convert to an Image and return
return target.toImage()
if __name__ == '__main__':
# additional imports
import argparse
from PIL import Image
# use the first line of the docstring as the program description
parser = argparse.ArgumentParser(description = __doc__.splitlines()[0])
# Minimum arguments - input (source) and output (expanded) filenames
parser.add_argument("input_file", help="the source texture file")
parser.add_argument("output_file", help="the destination file")
# TODO gonna need to read some options
# gaussian weighting (default flat)
# randomisation method (default none)
# targeted synthesis
parser.add_argument("-target", dest="target_file",
help="image for target of synthesis")
# untargeted synthesis scale
parser.add_argument("-scale", default = 2, type = int,
help="Scale factor for generated texture (ignored if targeted)")
# neighbourhood size
parser.add_argument("-nsize", default = 2, type = int,
help = "Size of neighbourhood used in comparisons")
# activate profiler
parser.add_argument("-prof", metavar = "filename",
help = "run profiler and save results")
args = parser.parse_args()
# Read the source image
try:
source_image = Image.open(args.input_file)
source = texture.Texture(source_image)
except IOError:
print("Could not open input image file", args.input_file)
exit(1)
# Create target image and neighbourhood
# SquareShape for targeted (looks ahead),
# EllShape for untargeted (only looks at initialised)
if (args.target_file != None):
# read from file if one is specified
try:
target_image = Image.open(args.target_file)
target = texture.Texture(target_image)
shape = texture.SquareShape(args.nsize)
except IOError:
print("Could not open target image file", args.target_file)
exit(1)
else:
# no target specified, create a blank one
tsize = (args.scale * source_image.size[0],
args.scale * source_image.size[1])
target = texture.EmptyTexture(tsize, source_image.mode)
shape = texture.EllShape(args.nsize)
# Perform the expansion
if (args.prof == None):
expansion = expand(source, target, shape)
else:
import cProfile
cProfile.run("expansion = expand(source, target, shape)", args.prof)
# Write the final image
try:
expansion.save(args.output_file)
except IOError:
print("Could not write output image file", args.output_file)
exit(1)
exit(0)
|
|
#!python
import argparse
import os
import cherrypy
import platform
import signal
import sys
import types
import ws4py.server
import tangelo
import tangelo.server
import tangelo.util
import tangelo.websocket
tangelo_version = "0.8.1-dev"
def tangelo_pkgdata():
print get_pkgdata_dir()
return 0
def tangelo_passwd():
import argparse
import getpass
import md5
import sys
# Parse arguments.
p = argparse.ArgumentParser(description="Edit .htaccess files for Tangelo")
p.add_argument("-c", "--create", action="store_true", help="Create new password file")
p.add_argument("passwordfile", metavar="passwordfile", type=str, nargs=1, help="Password file")
p.add_argument("realm", metavar="realm", type=str, nargs=1, help="Authentication realm")
p.add_argument("user", metavar="user", type=str, nargs=1, help="Username")
args = p.parse_args()
# Capture argument values.
create = args.create
passwordfile = args.passwordfile[0]
realm = args.realm[0]
user = args.user[0]
# Open the password file and read in the contents.
try:
with open(passwordfile) as f:
pws = map(lambda x: x.strip().split(":"), f.readlines())
except IOError:
create = True
pws = []
# Find the record matching the user.
userrec = filter(lambda x: x[1][0] == user and x[1][1] == realm, enumerate(pws))
n = len(userrec)
if n > 1:
print >>sys.stderr, "warning: user '%s' for realm '%s' occurs %d times... using only first occurrence"
# Get the first element of userrec, if there is one.
if userrec == []:
# If there was no matching record, make up a dummy one.
userrec = [None, [user, realm, None]]
else:
userrec = list(userrec[0])
# Get a password and confirmation from the user.
password = getpass.getpass("Enter password for %s@%s: " % (user, realm))
confirm = getpass.getpass("Re-enter password: ")
if password != confirm:
print >>sys.stderr, "Passwords do not match, aborting."
return 1
# Install the md5 hash in the "password" slot of the updating record.
userrec[1][2] = md5.md5("%s:%s:%s" % (user, realm, password)).hexdigest()
# If requested to "create" a new password file, delete the pws array, and
# arrange for the userrec to be appended to the pws array, rather than updating
# some indexed entry of it (with the signal index of -1).
if create:
pws = [userrec[1]]
else:
if userrec[0] is None:
pws.append(userrec[1])
else:
pws[userrec[0]] = userrec[1]
try:
with open(passwordfile, "w") as f:
f.writelines(map(lambda x: ":".join(x) + "\n", pws))
except IOError:
print >>sys.stderr, "error: could not open file '%s' for writing!" % (passwordfile)
return 1
return 0
class Config(object):
options = {"access_auth": [bool],
"drop_privileges": [bool],
"sessions": [bool],
"list_dir": [bool],
"show_py": [bool],
"hostname": types.StringTypes,
"port": [int],
"user": types.StringTypes,
"group": types.StringTypes,
"key": types.StringTypes,
"cert": types.StringTypes,
"root": types.StringTypes,
"plugins": [list]}
def __init__(self, filename):
for option in Config.options:
self.__dict__[option] = None
self.errors = []
if filename is not None:
self.load(filename)
def load(self, filename):
try:
d = tangelo.util.yaml_safe_load(filename, dict)
except TypeError:
self.errors.append("config file does not contain associative array at top level")
return
for option, setting in d.iteritems():
uscore = option.replace("-", "_")
if uscore not in Config.options:
self.errors.append("unknown option %s" % (option))
else:
self.__dict__[uscore] = setting
def type_check_value(self, option, valid_types):
value = self.__dict__.get(option)
if value is not None and not any(isinstance(value, t) for t in valid_types):
self.errors.append("option %s must be of type %s" % (option, " or ".join([t.__name__ for t in valid_types])))
def type_check(self):
for option, valid_types in Config.options.iteritems():
self.type_check_value(option, valid_types)
return len(self.errors) == 0
def polite(signum, frame):
tangelo.log_warning("TANGELO", "Already shutting down. To force shutdown immediately, send SIGQUIT (Ctrl-\\).")
def die(signum, frame):
tangelo.log_error("TANGELO", "Received quit signal. Exiting immediately.")
os.kill(os.getpid(), signal.SIGKILL)
def shutdown(signum, frame):
tangelo.log_info("TANGELO", "Received interrupt signal, performing graceful shutdown")
# Disbale the shutdown handler (i.e., for repeated Ctrl-C etc.) for the
# "polite" shutdown signals.
for sig in [signal.SIGINT, signal.SIGTERM]:
signal.signal(sig, polite)
# Perform plugin shutdown operations.
tangelo.log_info("TANGELO", "Shutting down plugins...")
plugins = cherrypy.config.get("plugins")
if plugins:
plugins.unload_all()
# Perform CherryPy shutdown and exit.
tangelo.log_info("TANGELO", "Stopping web server")
cherrypy.engine.stop()
cherrypy.engine.exit()
tangelo.log_success("TANGELO", "Be seeing you.")
def get_pkgdata_dir():
return os.path.dirname(__file__)
def get_web_directory():
return os.path.join(get_pkgdata_dir(), "pkgdata/web")
def get_bundled_plugin_directory():
return os.path.join(get_pkgdata_dir(), "pkgdata/plugin")
def get_tangelo_ico():
return os.path.join(get_pkgdata_dir(), "pkgdata/tangelo.ico")
def main():
p = argparse.ArgumentParser(description="Start a Tangelo server.")
p.add_argument("-c", "--config", type=str, default=None, metavar="FILE", help="specifies configuration file to use")
p.add_argument("-a", "--access-auth", action="store_const", const=True, default=None, help="enable HTTP authentication (i.e. processing of .htaccess files) (default)")
p.add_argument("-na", "--no-access-auth", action="store_const", const=True, default=None, help="disable HTTP authentication (i.e. processing of .htaccess files)")
p.add_argument("-p", "--drop-privileges", action="store_const", const=True, default=None, help="enable privilege drop when started as superuser (default)")
p.add_argument("-np", "--no-drop-privileges", action="store_const", const=True, default=None, help="disable privilege drop when started as superuser")
p.add_argument("-s", "--sessions", action="store_const", const=True, default=None, help="enable session tracking (default)")
p.add_argument("-ns", "--no-sessions", action="store_const", const=True, default=None, help="disable session tracking")
p.add_argument("--list-dir", action="store_true", default=None, help="enable directory content serving")
p.add_argument("--no-list-dir", action="store_true", default=None, help="disable directory content serving (default)")
p.add_argument("--show-py", action="store_true", default=None, help="enable Python service source code serving")
p.add_argument("--no-show-py", action="store_true", default=None, help="disable Python service source code serving (default)")
p.add_argument("--hostname", type=str, default=None, metavar="HOSTNAME", help="overrides configured hostname on which to run Tangelo")
p.add_argument("--port", type=int, default=None, metavar="PORT", help="overrides configured port number on which to run Tangelo")
p.add_argument("-u", "--user", type=str, default=None, metavar="USERNAME", help="specifies the user to run as when root privileges are dropped")
p.add_argument("-g", "--group", type=str, default=None, metavar="GROUPNAME", help="specifies the group to run as when root privileges are dropped")
p.add_argument("-r", "--root", type=str, default=None, metavar="DIR", help="the directory from which Tangelo will serve content")
p.add_argument("--verbose", "-v", action="store_true", help="display extra information as Tangelo starts up")
p.add_argument("--version", action="store_true", help="display Tangelo version number")
p.add_argument("--key", type=str, default=None, metavar="FILE", help="the path to the SSL key. You must also specify --cert to serve content over https.")
p.add_argument("--cert", type=str, default=None, metavar="FILE", help="the path to the SSL certificate. You must also specify --key to serve content over https.")
p.add_argument("--examples", action="store_true", default=None, help="Serve the Tangelo example applications")
args = p.parse_args()
# If version flag is present, print the version number and exit.
if args.version:
print tangelo_version
return 0
# Make sure user didn't specify conflicting flags.
if args.access_auth and args.no_access_auth:
tangelo.log_error("ERROR", "can't specify both --access-auth (-a) and --no-access-auth (-na) together")
return 1
if args.drop_privileges and args.no_drop_privileges:
tangelo.log_error("ERROR", "can't specify both --drop-privileges (-p) and --no-drop-privileges (-np) together")
return 1
if args.no_sessions and args.sessions:
tangelo.log_error("ERROR", "can't specify both --sessions (-s) and --no-sessions (-ns) together")
return 1
if args.examples and args.root:
tangelo.log_error("ERROR", "can't specify both --examples and --root (-r) together")
return 1
if args.examples and args.config:
tangelo.log_error("ERROR", "can't specify both --examples and --config (-c) together")
return 1
if args.no_list_dir and args.list_dir:
tangelo.log_error("ERROR", "can't specify both --list-dir and --no-list-dir together")
sys.exit(1)
if args.no_show_py and args.show_py:
tangelo.log_error("ERROR", "can't specify both --show-py and --no-show-py together")
sys.exit(1)
# Decide if we have a configuration file or not.
cfg_file = args.config
if cfg_file is None:
tangelo.log("TANGELO", "No configuration file specified - using command line args and defaults")
else:
cfg_file = tangelo.util.expandpath(cfg_file)
tangelo.log("TANGELO", "Using configuration file %s" % (cfg_file))
# Parse the config file; report errors if any.
try:
config = Config(cfg_file)
except (IOError, ValueError) as e:
tangelo.log_error("ERROR", e)
return 1
# Type check the config entries.
if not config.type_check():
for message in config.errors:
tangelo.log_error("TANGELO", message)
return 1
# Determine whether to use access auth.
access_auth = True
if args.access_auth is None and args.no_access_auth is None:
if config.access_auth is not None:
access_auth = config.access_auth
else:
access_auth = (args.access_auth is not None) or (not args.no_access_auth)
tangelo.log("TANGELO", "Access authentication %s" % ("enabled" if access_auth else "disabled"))
# Determine whether to perform privilege drop.
drop_privileges = True
if args.drop_privileges is None and args.no_drop_privileges is None:
if config.drop_privileges is not None:
drop_privileges = config.drop_privileges
else:
drop_privileges = (args.drop_privileges is not None) or (not args.no_drop_privileges)
# Determine whether to enable sessions.
sessions = True
if args.sessions is None and args.no_sessions is None:
if config.sessions is not None:
sessions = config.sessions
else:
sessions = (args.sessions is not None) or (not args.no_sessions)
tangelo.log("TANGELO", "Sessions %s" % ("enabled" if sessions else "disabled"))
# Determine whether to serve directory listings by default.
listdir = False
if args.list_dir is None and args.no_list_dir is None:
if config.list_dir is not None:
listdir = config.list_dir
else:
listdir = (args.list_dir is not None) or (not args.no_list_dir)
cherrypy.config["listdir"] = listdir
tangelo.log("TANGELO", "Directory content serving %s" % ("enabled" if listdir else "disabled"))
# Determine whether to serve web service Python source code by default.
showpy = False
if args.show_py is None and args.no_show_py is None:
if config.show_py is not None:
showpy = config.show_py
else:
showpy = (args.show_py is not None) or (not args.no_show_py)
cherrypy.config["showpy"] = showpy
tangelo.log("TANGELO", "Web service source code serving %s" % ("enabled" if showpy else "disabled"))
# Extract the rest of the arguments, giving priority first to command line
# arguments, then to the configuration file (if any), and finally to a
# hard-coded default value.
hostname = args.hostname or config.hostname or "localhost"
port = args.port or config.port or 8080
user = args.user or config.user or "nobody"
group = args.group or config.group or "nobody"
tangelo.log("TANGELO", "Hostname: %s" % (hostname))
tangelo.log("TANGELO", "Port: %d" % (port))
tangelo.log("TANGELO", "Privilege drop %s" % ("enabled (if necessary)" if drop_privileges else "disabled"))
if drop_privileges:
tangelo.log("TANGELO", "\tUser: %s" % (user))
tangelo.log("TANGELO", "\tGroup: %s" % (group))
# HTTPS support
#
# Grab the ssl key file.
ssl_key = args.key or config.key
if ssl_key is not None:
ssl_key = tangelo.util.expandpath(ssl_key)
# Grab the cert file.
ssl_cert = args.cert or config.cert
if ssl_cert is not None:
ssl_cert = tangelo.util.expandpath(ssl_cert)
# In order to enable HTTPS, *both* the key and cert must be specified. If
# only one or the other is specified, this is considered an error, because
# we don't want to serve what the user is considering sensitive content over
# HTTP by default.
if ssl_key is not None and ssl_cert is not None:
cherrypy.config.update({"server.ssl_module": "pyopenssl",
"server.ssl_certificate": ssl_cert,
"server.ssl_private_key": ssl_key})
tangelo.log("TANGELO", "HTTPS enabled")
tangelo.log("TANGELO", "\tSSL Cert file: %s" % (ssl_cert))
tangelo.log("TANGELO", "\tSSL Key file: %s" % (ssl_key))
elif not (ssl_key is None and ssl_cert is None):
tangelo.log_error("TANGELO", "error: SSL key or SSL cert missing")
return 1
else:
tangelo.log("TANGELO", "HTTPS disabled")
# We need a web root - use the installed example web directory as a
# fallback. This might be found in a few different places, so try them one
# by one until we find one that exists.
root = args.root or config.root
if root:
root = tangelo.util.expandpath(root)
elif args.examples:
# Set the examples web root.
root = get_web_directory()
tangelo.log_info("TANGELO", "Looking for example web content path in %s" % (root))
if not os.path.exists(root):
tangelo.log_error("ERROR", "could not find examples package")
return 1
# Set the examples plugins.
config.plugins = [{"name": "config"},
{"name": "data"},
{"name": "docs"},
{"name": "mapping"},
{"name": "mongo"},
{"name": "stream"},
{"name": "tangelo"},
{"name": "ui"},
{"name": "vis"}]
else:
root = tangelo.util.expandpath(".")
tangelo.log("TANGELO", "Serving content from %s" % (root))
# Set the web root directory.
cherrypy.config.update({"webroot": root})
# Place an empty dict to hold per-module configuration into the global
# configuration object, and one for persistent per-module storage (the
# latter can be manipulated by the service).
cherrypy.config.update({"module-config": {}})
cherrypy.config.update({"module-store": {}})
# Analogs of the module storage dicts, but for plugins.
cherrypy.config.update({"plugin-config": {}})
cherrypy.config.update({"plugin-store": {}})
# Create a plugin manager.
plugins = tangelo.server.Plugins("tangelo.plugin", config=config.plugins, plugin_dir=get_bundled_plugin_directory())
# Check for any errors - if there are, report them and exit.
if not plugins.good():
for message in plugins.errors:
tangelo.log_error("PLUGIN", message)
return 1
# Save the plugin manager for use later (when unloading plugins during
# shutdown).
cherrypy.config.update({"plugins": plugins})
# Create an instance of the main handler object.
module_cache = tangelo.util.ModuleCache()
tangelo_server = tangelo.server.Tangelo(module_cache=module_cache, plugins=plugins)
rootapp = cherrypy.Application(tangelo_server, "/")
# Place an AuthUpdate handler in the Tangelo object if access authorization
# is on.
tangelo_server.auth_update = tangelo.server.AuthUpdate(app=rootapp)
# Mount the root application object.
cherrypy.tree.mount(rootapp, config={"/": {"tools.sessions.on": sessions},
"/favicon.ico": {"tools.staticfile.on": True,
"tools.staticfile.filename": get_tangelo_ico()}})
# Set up the global configuration.
cherrypy.config.update({"environment": "production",
"log.screen": True,
"server.socket_host": hostname,
"server.socket_port": port})
# Try to drop privileges if requested, since we've bound to whatever port
# superuser privileges were needed for already.
if drop_privileges:
# If we're on windows, don't supply any username/groupname, and just
# assume we should drop priveleges.
if platform.system() == "Windows":
tangelo.log("TANGELO", "Performing privilege drop")
cherrypy.process.plugins.DropPrivileges(cherrypy.engine).subscribe()
elif os.getuid() == 0:
tangelo.log("TANGELO", "Performing privilege drop")
# Reaching here means we're on unix, and we are the root user, so go
# ahead and drop privileges to the requested user/group.
import grp
import pwd
# On some systems, negative uids and gids are allowed. These can
# render in Python (in particular, on OS X) as very large unsigned
# values. This function first checks to see if the input value is
# already negative; if so, there's no issue and we return it
# unchanged. Otherwise, we treat the argument as a bit
# representation of a *signed* value, check the sign bit to see if
# it *should* be a negative number, and then perform the proper
# arithmetic to turn it into a signed one.
def to_signed(val):
# If we already see a negative number, just return it.
if val < 0:
return val
# Check sign bit, and subtract the unsigned range from the value
# if it is set.
return val - 0x100000000 if val & 0x80000000 else val
# Find the UID and GID for the requested user and group.
try:
mode = "user"
value = user
uid = to_signed(pwd.getpwnam(user).pw_uid)
mode = "group"
value = group
gid = to_signed(grp.getgrnam(group).gr_gid)
except KeyError:
tangelo.log_error("TANGELO", "no such %s '%s' to drop privileges to" % (mode, value))
return 1
# Set the process home directory to be the dropped-down user's.
os.environ["HOME"] = os.path.expanduser("~%s" % (user))
# Perform the actual UID/GID change.
cherrypy.process.plugins.DropPrivileges(cherrypy.engine, uid=uid, gid=gid).subscribe()
else:
tangelo.log("TANGELO", "Not performing privilege drop (because not running as superuser)")
# Set up websocket handling. Use the pass-through subclassed version of the
# plugin so we can set a priority on it that doesn't conflict with privilege
# drop.
tangelo.websocket.WebSocketLowPriorityPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = ws4py.server.cherrypyserver.WebSocketTool()
# Replace the stock auth_digest and auth_basic tools with ones that have
# slightly lower priority (so the AuthUpdate tool can run before them).
cherrypy.tools.auth_basic = cherrypy.Tool("before_handler", cherrypy.lib.auth_basic.basic_auth, priority=2)
cherrypy.tools.auth_digest = cherrypy.Tool("before_handler", cherrypy.lib.auth_digest.digest_auth, priority=2)
# Install signal handlers to allow for proper cleanup/shutdown.
for sig in [signal.SIGINT, signal.SIGTERM]:
signal.signal(sig, shutdown)
# Send SIGQUIT to an immediate, ungraceful shutdown instead.
if platform.system() != "Windows":
signal.signal(signal.SIGQUIT, die)
# Start the CherryPy engine.
cherrypy.engine.start()
tangelo.log_success("TANGELO", "Server is running")
cherrypy.engine.block()
if __name__ == "__main__":
sys.exit(main())
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
class DeploymentCache:
"""Cached CLI state for a QIIME deployment.
In this context, a QIIME deployment is the set of installed Python
packages, including their exact versions, that register one or more QIIME 2
plugins. The exact version of q2cli is also included in the deployment.
The deployment cache stores the current deployment's package names and
versions in a requirements.txt file under the cache directory. This file is
used to determine if the cache is outdated. If the cache is determined to
be outdated, it will be refreshed based on the current deployment state.
Thus, adding, removing, upgrading, or downgrading a plugin package or q2cli
itself will trigger a cache refresh.
Two mechanisms are provided to force a cache refresh. Setting the
environment variable Q2CLIDEV to any value will cause the cache to be
refreshed upon instantiation. Calling `.refresh()` will also refresh the
cache. Forced refreshing of the cache is useful for plugin and/or q2cli
developers who want their changes to take effect in the CLI without
changing their package versions.
Cached CLI state is stored in a state.json file under the cache directory.
It is not a public file format and it is not versioned. q2cli is included
as part of the QIIME deployment so that the cached state can always be read
(or recreated as necessary) by the currently installed version of q2cli.
This class is intended to be a singleton because it is responsible for
managing the on-disk cache. Having more than one instance managing the
cache has the possibility of two instances clobbering the cache (e.g. in a
multithreaded/multiprocessing situation). Also, having a single instance
improves performance by only reading and/or refreshing the cache a
single time during its lifetime. Having two instances could, for example,
trigger two cache refreshes if Q2CLIDEV is set. To support these use-cases,
a module-level `CACHE` variable stores a single instance of this class.
"""
# Public API
def __init__(self):
import os
# Indicates if the cache has been refreshed. For performance purposes,
# the cache is only refreshed a single time (at maximum) during the
# object's lifetime. Thus, "hot reloading" isn't supported, but this
# shouldn't be necessary for the CLI.
self._refreshed = False
self._cache_dir = self._get_cache_dir()
refresh = 'Q2CLIDEV' in os.environ
self._state = self._get_cached_state(refresh=refresh)
@property
def plugins(self):
"""Decoded JSON object representing CLI state on a per-plugin basis."""
return self._state['plugins']
def refresh(self):
"""Trigger a forced refresh of the cache.
If the cache has already been refreshed (either by this method or at
some point during instantiation), this method is a no-op.
"""
if not self._refreshed:
self._state = self._get_cached_state(refresh=True)
# Private API
def _get_cache_dir(self):
import os
import q2cli.util
cache_dir = q2cli.util.get_cache_dir()
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
def _get_cached_state(self, refresh):
import json
import os.path
import q2cli.util
current_requirements = self._get_current_requirements()
state_path = os.path.join(self._cache_dir, 'state.json')
# See note on `get_completion_path` for why knowledge of this path
# exists in `q2cli.util` and not in this class.
completion_path = q2cli.util.get_completion_path()
# The cache must be refreshed in the following cases:
# 1) We have been explicitly told to refresh.
if refresh:
self._cache_current_state(current_requirements)
# 2) The current deployment requirements are different than the cached
# requirements.
elif current_requirements != self._get_cached_requirements():
self._cache_current_state(current_requirements)
# 3) The cached state file does not exist.
elif not os.path.exists(state_path):
self._cache_current_state(current_requirements)
# 4) The cached bash completion script does not exist.
elif not os.path.exists(completion_path):
self._cache_current_state(current_requirements)
# Now that the cache is up-to-date, read it.
try:
with open(state_path, 'r') as fh:
return json.load(fh)
except json.JSONDecodeError:
# 5) The cached state file can't be read as JSON.
self._cache_current_state(current_requirements)
with open(state_path, 'r') as fh:
return json.load(fh)
# NOTE: The private methods below are all used internally within
# `_get_cached_state`.
def _get_current_requirements(self):
"""Includes installed versions of q2cli and QIIME 2 plugins."""
import os
import pkg_resources
import q2cli
reqs = {
pkg_resources.Requirement.parse('q2cli == %s' % q2cli.__version__)
}
# A distribution (i.e. Python package) can have multiple plugins, where
# each plugin is its own entry point. A distribution's `Requirement` is
# hashable, and the `set` is used to exclude duplicates. Thus, we only
# gather the set of requirements for all installed Python packages
# containing one or more plugins. It is not necessary to track
# individual plugin names and versions in order to determine if the
# cache is outdated.
#
# TODO: this code is (more or less) copied from
# `qiime2.sdk.PluginManager.iter_entry_points`. Importing QIIME is
# currently slow, and it adds ~600-700ms to any CLI command. This makes
# the CLI pretty unresponsive, especially when running help/informative
# commands. Replace with the following lines when
# https://github.com/qiime2/qiime2/issues/151 is fixed:
#
# for ep in qiime2.sdk.PluginManager.iter_entry_points():
# reqs.add(ep.dist.as_requirement())
#
for entry_point in pkg_resources.iter_entry_points(
group='qiime2.plugins'):
if 'QIIMETEST' in os.environ:
if entry_point.name == 'dummy-plugin':
reqs.add(entry_point.dist.as_requirement())
else:
if entry_point.name != 'dummy-plugin':
reqs.add(entry_point.dist.as_requirement())
return reqs
def _get_cached_requirements(self):
import os.path
import pkg_resources
path = os.path.join(self._cache_dir, 'requirements.txt')
if not os.path.exists(path):
# No cached requirements. The empty set will always trigger a cache
# refresh because the current requirements will, at minimum,
# contain q2cli.
return set()
else:
with open(path, 'r') as fh:
contents = fh.read()
try:
return set(pkg_resources.parse_requirements(contents))
except pkg_resources.RequirementParseError:
# Unreadable cached requirements, trigger a cache refresh.
return set()
def _cache_current_state(self, requirements):
import json
import os.path
import click
import q2cli.completion
import q2cli.util
click.secho(
"QIIME is caching your current deployment for improved "
"performance. This may take a few moments and should only happen "
"once per deployment.", fg='yellow', err=True)
cache_dir = self._cache_dir
state = self._get_current_state()
path = os.path.join(cache_dir, 'state.json')
with open(path, 'w') as fh:
json.dump(state, fh)
q2cli.completion.write_bash_completion_script(
state['plugins'], q2cli.util.get_completion_path())
# Write requirements file last because the above steps may raise errors
# (e.g. a plugin can't be loaded in `_get_current_state`). If any part
# of the cache writing fails, it needs to be refreshed the next time
# the cache is accessed. The absence of a requirements file will
# trigger this cache refresh, avoiding this bug:
# https://github.com/qiime2/q2cli/issues/88
path = os.path.join(cache_dir, 'requirements.txt')
with open(path, 'w') as fh:
for req in requirements:
# `str(Requirement)` is the recommended way to format a
# `Requirement` that can be read with `Requirement.parse`.
fh.write(str(req))
fh.write('\n')
self._refreshed = True
def _get_current_state(self):
"""Get current CLI state as an object that is serializable as JSON.
WARNING: This method is very slow and should only be called when the
cache needs to be refreshed.
"""
import qiime2.sdk
state = {
'plugins': {}
}
plugin_manager = qiime2.sdk.PluginManager()
for name, plugin in plugin_manager.plugins.items():
state['plugins'][name] = self._get_plugin_state(plugin)
return state
def _get_plugin_state(self, plugin):
state = {
# TODO this conversion also happens in the framework
# (qiime2/plugins.py) to generate an importable module name from a
# plugin's `.name` attribute. Centralize this knowledge in the
# framework, ideally as a machine-friendly plugin ID (similar to
# `Action.id`).
'id': plugin.name.replace('-', '_'),
'name': plugin.name,
'version': plugin.version,
'website': plugin.website,
'citation_text': plugin.citation_text,
'user_support_text': plugin.user_support_text,
'description': plugin.description,
'short_description': plugin.short_description,
'actions': {}
}
for id, action in plugin.actions.items():
state['actions'][id] = self._get_action_state(action)
return state
def _get_action_state(self, action):
state = {
'id': action.id,
'name': action.name,
'description': action.description,
'signature': {
# This preserves order of inputs, parameters, and outputs,
# which will be necessary when `Action.signature` retains API
# order: https://github.com/qiime2/qiime2/issues/70
'inputs': [],
'parameters': [],
'outputs': [],
'defaults': action.signature.defaults
}
}
# Inputs and outputs are handled the same. Parameters must be handled a
# little differently because they require an AST representation.
for group in 'inputs', 'outputs':
for name, spec in getattr(action.signature, group).items():
data = {'name': name, 'repr': repr(spec.qiime_type)}
data['description'] = spec.description if \
spec.has_description() else None
state['signature'][group].append(data)
for name, spec in action.signature.parameters.items():
data = {'name': name, 'repr': repr(spec.qiime_type),
'ast': spec.qiime_type.to_ast()}
data['description'] = spec.description if \
spec.has_description() else None
state['signature']['parameters'].append(data)
return state
# Singleton. Import and use this instance as necessary.
CACHE = DeploymentCache()
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from quantum.agent import securitygroups_rpc as sg_rpc
from quantum.common import constants as q_const
from quantum.common import rpc as q_rpc
from quantum.common import topics
from quantum.db import agents_db
from quantum.db import api as db_api
from quantum.db import dhcp_rpc_base
from quantum.db import l3_rpc_base
from quantum.db import securitygroups_rpc_base as sg_db_rpc
from quantum.openstack.common import log
from quantum.openstack.common.rpc import proxy
from quantum.plugins.ml2 import db
from quantum.plugins.ml2 import driver_api as api
LOG = log.getLogger(__name__)
TAP_DEVICE_PREFIX = 'tap'
TAP_DEVICE_PREFIX_LENGTH = 3
class RpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
l3_rpc_base.L3RpcCallbackMixin,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin):
RPC_API_VERSION = '1.1'
# history
# 1.0 Initial version (from openvswitch/linuxbridge)
# 1.1 Support Security Group RPC
def __init__(self, notifier):
self.notifier = notifier
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
@classmethod
def _device_to_port_id(cls, device):
# REVISIT(rkukura): Consider calling into MechanismDrivers to
# process device names, or having MechanismDrivers supply list
# of device prefixes to strip.
if device.startswith(TAP_DEVICE_PREFIX):
return device[TAP_DEVICE_PREFIX_LENGTH:]
else:
return device
@classmethod
def get_port_from_device(cls, device):
port_id = cls._device_to_port_id(device)
port = db.get_port_and_sgs(port_id)
if port:
port['device'] = device
return port
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s details requested by agent "
"%(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port_id = self._device_to_port_id(device)
session = db_api.get_session()
with session.begin(subtransactions=True):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_("Device %(device)s requested by agent "
"%(agent_id)s not found in database"),
{'device': device, 'agent_id': agent_id})
return {'device': device}
segments = db.get_network_segments(session, port.network_id)
if not segments:
LOG.warning(_("Device %(device)s requested by agent "
"%(agent_id)s has network %(network_id) with "
"no segments"),
{'device': device,
'agent_id': agent_id,
'network_id': port.network_id})
return {'device': device}
#TODO(rkukura): Use/create port binding
segment = segments[0]
new_status = (q_const.PORT_STATUS_ACTIVE if port.admin_state_up
else q_const.PORT_STATUS_DOWN)
if port.status != new_status:
port.status = new_status
entry = {'device': device,
'network_id': port.network_id,
'port_id': port.id,
'admin_state_up': port.admin_state_up,
'network_type': segment[api.NETWORK_TYPE],
'segmentation_id': segment[api.SEGMENTATION_ID],
'physical_network': segment[api.PHYSICAL_NETWORK]}
LOG.debug(_("Returning: %s"), entry)
return entry
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
# TODO(garyk) - live migration and port status
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s no longer exists at agent "
"%(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port_id = self._device_to_port_id(device)
session = db_api.get_session()
with session.begin(subtransactions=True):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_("Device %(device)s updated down by agent "
"%(agent_id)s not found in database"),
{'device': device, 'agent_id': agent_id})
return {'device': device,
'exists': False}
if port.status != q_const.PORT_STATUS_DOWN:
port.status = q_const.PORT_STATUS_DOWN
return {'device': device,
'exists': True}
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s up at agent %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port_id = self._device_to_port_id(device)
session = db_api.get_session()
with session.begin(subtransactions=True):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_("Device %(device)s updated up by agent "
"%(agent_id)s not found in database"),
{'device': device, 'agent_id': agent_id})
if port.status != q_const.PORT_STATUS_ACTIVE:
port.status = q_const.PORT_STATUS_ACTIVE
# TODO(rkukura) Add tunnel_sync() here if not implemented via a
# driver.
class AgentNotifierApi(proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
"""Agent side of the openvswitch rpc API.
API version history:
1.0 - Initial version.
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
# TODO(rkukura): Add topic_tunnel_update here if not
# implemented via a driver.
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, network_type, segmentation_id,
physical_network):
self.fanout_cast(context,
self.make_msg('port_update',
port=port,
network_type=network_type,
segmentation_id=segmentation_id,
physical_network=physical_network),
topic=self.topic_port_update)
# TODO(rkukura): Add tunnel_update() here if not
# implemented via a driver.
|
|
# Copyright 2008-2011 WebDriver committers
# Copyright 2008-2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import base64
import copy
import os
import re
import shutil
import tempfile
import zipfile
from cStringIO import StringIO
from xml.dom import minidom
from distutils import dir_util
from selenium.webdriver.common.proxy import ProxyType
from selenium.common.exceptions import WebDriverException
WEBDRIVER_EXT = "webdriver.xpi"
EXTENSION_NAME = "fxdriver@googlecode.com"
class FirefoxProfile(object):
ANONYMOUS_PROFILE_NAME = "WEBDRIVER_ANONYMOUS_PROFILE"
DEFAULT_PREFERENCES = {
"app.update.auto": "false",
"app.update.enabled": "false",
"browser.startup.page" : "0",
"browser.download.manager.showWhenStarting": "false",
"browser.EULA.override": "true",
"browser.EULA.3.accepted": "true",
"browser.link.open_external": "2",
"browser.link.open_newwindow": "2",
"browser.offline": "false",
"browser.safebrowsing.enabled": "false",
"browser.search.update": "false",
"browser.sessionstore.resume_from_crash": "false",
"browser.shell.checkDefaultBrowser": "false",
"browser.tabs.warnOnClose": "false",
"browser.tabs.warnOnOpen": "false",
"browser.startup.page": "0",
"browser.safebrowsing.malware.enabled": "false",
"startup.homepage_welcome_url": "\"about:blank\"",
"devtools.errorconsole.enabled": "true",
"dom.disable_open_during_load": "false",
"extensions.autoDisableScopes" : 10,
"extensions.logging.enabled": "true",
"extensions.update.enabled": "false",
"extensions.update.notifyUser": "false",
"network.manage-offline-status": "false",
"network.http.max-connections-per-server": "10",
"network.http.phishy-userpass-length": "255",
"offline-apps.allow_by_default": "true",
"prompts.tab_modal.enabled": "false",
"security.fileuri.origin_policy": "3",
"security.fileuri.strict_origin_policy": "false",
"security.warn_entering_secure": "false",
"security.warn_submit_insecure": "false",
"security.warn_entering_secure.show_once": "false",
"security.warn_entering_weak": "false",
"security.warn_entering_weak.show_once": "false",
"security.warn_leaving_secure": "false",
"security.warn_leaving_secure.show_once": "false",
"security.warn_submit_insecure": "false",
"security.warn_viewing_mixed": "false",
"security.warn_viewing_mixed.show_once": "false",
"signon.rememberSignons": "false",
"toolkit.networkmanager.disable": "true",
"toolkit.telemetry.enabled": "false",
"toolkit.telemetry.prompted": "2",
"toolkit.telemetry.rejected": "true",
"javascript.options.showInConsole": "true",
"browser.dom.window.dump.enabled": "true",
"webdriver_accept_untrusted_certs": "true",
"webdriver_enable_native_events": "true",
"webdriver_assume_untrusted_issuer": "true",
"dom.max_script_run_time": "30",
}
def __init__(self,profile_directory=None):
"""
Initialises a new instance of a Firefox Profile
:args:
- profile_directory: Directory of profile that you want to use.
This defaults to None and will create a new
directory when object is created.
"""
self.default_preferences = copy.deepcopy(
FirefoxProfile.DEFAULT_PREFERENCES)
self.profile_dir = profile_directory
if self.profile_dir is None:
self.profile_dir = self._create_tempfolder()
else:
newprof = os.path.join(tempfile.mkdtemp(),
"webdriver-py-profilecopy")
shutil.copytree(self.profile_dir, newprof,
ignore=shutil.ignore_patterns("parent.lock", "lock", ".parentlock"))
self.profile_dir = newprof
self._read_existing_userjs()
self.extensionsDir = os.path.join(self.profile_dir, "extensions")
self.userPrefs = os.path.join(self.profile_dir, "user.js")
#Public Methods
def set_preference(self, key, value):
"""
sets the preference that we want in the profile.
"""
clean_value = ''
if value is True:
clean_value = 'true'
elif value is False:
clean_value = 'false'
elif isinstance(value, str):
clean_value = '"%s"' % value
elif isinstance(value, unicode):
clean_value = '"%s"' % value
else:
clean_value = str(int(value))
self.default_preferences[key] = clean_value
def add_extension(self, extension=WEBDRIVER_EXT):
self._install_extension(extension)
def update_preferences(self):
self._write_user_prefs(self.default_preferences)
#Properties
@property
def path(self):
"""
Gets the profile directory that is currently being used
"""
return self.profile_dir
@property
def port(self):
"""
Gets the port that WebDriver is working on
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port that WebDriver will be running on
"""
if not isinstance(port, int):
raise WebDriverException("Port needs to be an integer")
self._port = port
self.set_preference("webdriver_firefox_port", self._port)
@property
def accept_untrusted_certs(self):
return self._santise_pref(
self.default_preferences["webdriver_accept_untrusted_certs"])
@accept_untrusted_certs.setter
def accept_untrusted_certs(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_accept_untrusted_certs", value)
@property
def assume_untrusted_cert_issuer(self):
return self._santise_pref(self.default_preferences["webdriver_assume_untrusted_issuer"])
@assume_untrusted_cert_issuer.setter
def assume_untrusted_cert_issuer(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_assume_untrusted_issuer", value)
@property
def native_events_enabled(self):
return self._santise_pref(self.default_preferences['webdriver_enable_native_events'])
@native_events_enabled.setter
def native_events_enabled(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_enable_native_events", value)
@property
def encoded(self):
"""
A zipped, base64 encoded string of profile directory
for use with remote WebDriver JSON wire protocol
"""
fp = StringIO()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
path_root = len(self.path) + 1 # account for trailing slash
for base, dirs, files in os.walk(self.path):
for fyle in files:
filename = os.path.join(base, fyle)
zipped.write(filename, filename[path_root:])
zipped.close()
return base64.encodestring(fp.getvalue())
def set_proxy(self, proxy):
if proxy is None:
raise ValueError("proxy can not be None")
if proxy.proxy_type is ProxyType.UNSPECIFIED:
return
self.set_preference("network.proxy.type", proxy.proxy_type['ff_value'])
if proxy.proxy_type is ProxyType.MANUAL:
self.set_preference("network.proxy.no_proxies_on", proxy.no_proxy)
self._set_manual_proxy_preference("ftp", proxy.ftp_proxy)
self._set_manual_proxy_preference("http", proxy.http_proxy)
self._set_manual_proxy_preference("ssl", proxy.ssl_proxy)
elif proxy.proxy_type is ProxyType.AUTODETECT:
self.set_preference("network.proxy.autoconfig_url", proxy.proxy_autoconfig_url)
#Private Methods
def _santise_pref(self, item):
if item == 'true':
return True
elif item == 'false':
return False
else:
return item
def _set_manual_proxy_preference(self, key, setting):
if setting is None or setting is '':
return
host_details = setting.split(":")
self.set_preference("network.proxy.%s" % key, host_details[1][2:])
if len(host_details) > 1:
self.set_preference("network.proxy.%s_port" % key, int(host_details[2]))
def _create_tempfolder(self):
"""
Creates a temp folder to store User.js and the extension
"""
return tempfile.mkdtemp()
def _write_user_prefs(self, user_prefs):
"""
writes the current user prefs dictionary to disk
"""
with open(self.userPrefs, "w") as f:
for key, value in user_prefs.items():
f.write('user_pref("%s", %s);\n' % (key, value))
def _read_existing_userjs(self):
userjs_path = os.path.join(self.profile_dir, 'user.js')
PREF_RE = re.compile(r'user_pref\("(.*)",\s(.*)\)')
try:
with open(userjs_path) as f:
for usr in f:
matches = re.search(PREF_RE, usr)
self.default_preferences[matches.group(1)] = matches.group(2)
except:
# The profile given hasn't had any changes made, i.e no users.js
pass
def _install_extension(self, addon, unpack=True):
"""
Installs addon from a filepath, url
or directory of addons in the profile.
- path: url, path to .xpi, or directory of addons
- unpack: whether to unpack unless specified otherwise in the install.rdf
"""
if addon == WEBDRIVER_EXT:
addon = os.path.join(os.path.dirname(__file__), WEBDRIVER_EXT)
tmpdir = None
xpifile = None
if addon.endswith('.xpi'):
tmpdir = tempfile.mkdtemp(suffix = '.' + os.path.split(addon)[-1])
compressed_file = zipfile.ZipFile(addon, 'r')
for name in compressed_file.namelist():
if name.endswith('/'):
os.makedirs(os.path.join(tmpdir, name))
else:
if not os.path.isdir(os.path.dirname(os.path.join(tmpdir, name))):
os.makedirs(os.path.dirname(os.path.join(tmpdir, name)))
data = compressed_file.read(name)
with open(os.path.join(tmpdir, name), 'wb') as f:
f.write(data)
xpifile = addon
addon = tmpdir
# determine the addon id
addon_details = self._addon_details(addon)
addon_id = addon_details.get('id')
assert addon_id, 'The addon id could not be found: %s' % addon
# copy the addon to the profile
extensions_path = os.path.join(self.profile_dir, 'extensions')
addon_path = os.path.join(extensions_path, addon_id)
if not unpack and not addon_details['unpack'] and xpifile:
if not os.path.exists(extensions_path):
os.makedirs(extensions_path)
shutil.copy(xpifile, addon_path + '.xpi')
else:
dir_util.copy_tree(addon, addon_path, preserve_symlinks=1)
# remove the temporary directory, if any
if tmpdir:
dir_util.remove_tree(tmpdir)
def _addon_details(self, addon_path):
"""
returns a dictionary of details about the addon
- addon_path : path to the addon directory
Returns:
{'id': u'rainbow@colors.org', # id of the addon
'version': u'1.4', # version of the addon
'name': u'Rainbow', # name of the addon
'unpack': False } # whether to unpack the addon
"""
# TODO: We don't use the unpack variable yet, but we should: bug 662683
details = {
'id': None,
'name': None,
'unpack': True,
'version': None
}
def get_namespace_id(doc, url):
attributes = doc.documentElement.attributes
namespace = ""
for i in range(attributes.length):
if attributes.item(i).value == url:
if ":" in attributes.item(i).name:
# If the namespace is not the default one remove 'xlmns:'
namespace = attributes.item(i).name.split(':')[1] + ":"
break
return namespace
def get_text(element):
"""Retrieve the text value of a given node"""
rc = []
for node in element.childNodes:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc).strip()
doc = minidom.parse(os.path.join(addon_path, 'install.rdf'))
# Get the namespaces abbreviations
em = get_namespace_id(doc, "http://www.mozilla.org/2004/em-rdf#")
rdf = get_namespace_id(doc, "http://www.w3.org/1999/02/22-rdf-syntax-ns#")
description = doc.getElementsByTagName(rdf + "Description").item(0)
for node in description.childNodes:
# Remove the namespace prefix from the tag for comparison
entry = node.nodeName.replace(em, "")
if entry in details.keys():
details.update({ entry: get_text(node) })
return details
|
|
# Copyright 2020. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License"];
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import struct
import unittest
from os import path
from random import choice, randint, uniform
from string import ascii_lowercase
from time import sleep
from unittest.mock import Mock
import thingsboard_gateway
from can import Notifier, BufferedReader, Bus, Message
from simplejson import load
from thingsboard_gateway.connectors.can.can_connector import CanConnector
logging.basicConfig(level=logging.ERROR,
format='%(asctime)s - %(levelname)s - %(module)s - %(lineno)d - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
class CanConnectorTestsBase(unittest.TestCase):
CONFIG_PATH = path.join(path.dirname(path.dirname(path.abspath(__file__))),
"data" + path.sep + "can" + path.sep)
def setUp(self):
self.bus = self._create_bus()
self.gateway = Mock(spec=thingsboard_gateway.TBGatewayService)
self.connector = None
self.config = None
def tearDown(self):
self.connector.close()
self.bus.shutdown()
def _create_bus(self):
return Bus(
channel="virtual_channel",
bustype="virtual",
receive_own_messages=False
)
def _create_connector(self, config_file_name):
with open(self.CONFIG_PATH + config_file_name, 'r', encoding="UTF-8") as file:
self.config = load(file)
self.connector = CanConnector(self.gateway, self.config, "can")
self.connector.open()
sleep(1) # some time to init
class CanConnectorPollingTests(CanConnectorTestsBase):
def test_polling_once(self):
self._create_connector("polling_once.json")
config = self.config["devices"][0]["attributes"][0]
message = self.bus.recv(self.connector.DEFAULT_POLL_PERIOD)
self.assertEqual(message.arbitration_id, config["nodeId"])
self.assertEqual(message.is_extended_id, config["isExtendedId"])
self.assertEqual(message.is_fd, self.connector.DEFAULT_FD_FLAG)
self.assertEqual(message.bitrate_switch, self.connector.DEFAULT_BITRATE_SWITCH_FLAG)
self.assertEqual(message.data, bytearray.fromhex(config["polling"]["dataInHex"]))
# Some buses may receive their own messages. Remove it from the queue
self.bus.recv(0)
# Check if no new polling messages
sleep(self.connector.DEFAULT_POLL_PERIOD)
message = self.bus.recv(self.connector.DEFAULT_POLL_PERIOD)
self.assertIsNone(message)
def test_polling_always(self):
self._create_connector("polling_always.json")
config = self.config["devices"][0]["attributes"][0]
for _ in range(1, 5):
# Timeout should be greater that polling period to prevent the case
# when message is received earlier than time is out.
message = self.bus.recv(config["polling"]["period"] + 0.2)
self.assertIsNotNone(message)
# Some buses may receive their own messages. Remove it from the queue
self.bus.recv(0)
self.assertEqual(message.arbitration_id, config["nodeId"])
self.assertEqual(message.is_extended_id, self.connector.DEFAULT_EXTENDED_ID_FLAG)
self.assertEqual(message.is_fd, self.connector.DEFAULT_FD_FLAG)
self.assertEqual(message.bitrate_switch, self.connector.DEFAULT_BITRATE_SWITCH_FLAG)
self.assertEqual(message.data, bytearray.fromhex(config["polling"]["dataInHex"]))
def test_multiple_polling(self):
reader = BufferedReader()
bus_notifier = Notifier(self.bus, [reader])
self._create_connector("multiple_polling.json")
config1 = self.config["devices"][0]["timeseries"][0]
config2 = self.config["devices"][0]["timeseries"][1]
config3 = self.config["devices"][0]["timeseries"][2]
time_to_wait = config2["polling"]["period"] * 4
message_count = int(time_to_wait / config2["polling"]["period"]) + 1 + \
int(time_to_wait / config3["polling"]["period"]) + 1 + \
1 # one time polling task
sleep(time_to_wait)
self.connector.close()
bus_notifier.stop()
messages = []
while True:
msg = reader.get_message(time_to_wait)
if msg is None:
break
messages.append(msg)
self.assertEqual(len(messages), message_count)
expected_message_ids = [config1["nodeId"], config2["nodeId"], config3["nodeId"],
config2["nodeId"], config3["nodeId"], config2["nodeId"],
config3["nodeId"], config2["nodeId"], config3["nodeId"],
config2["nodeId"]]
for i in range(0, message_count):
self.assertEqual(messages[i].arbitration_id, expected_message_ids[i])
class CanConnectorTsAndAttrTests(CanConnectorTestsBase):
def _create_bus(self):
return Bus(
channel="virtual_channel",
bustype="virtual",
receive_own_messages=False,
is_fd=True
)
def test_string_attribute_and_custom_device_type(self):
self._create_connector("ts_and_attr.json")
device_name = self.config["devices"][0]["name"]
config = self.config["devices"][0]["attributes"][0]
value_matches = re.search(self.connector.VALUE_REGEX, config["value"])
string_value = ''.join(choice(ascii_lowercase) for _ in range(int(value_matches.group(2))))
can_data = list(config["command"]["value"].to_bytes(config["command"]["length"],
config["command"]["byteorder"]))
can_data.extend(string_value.encode(value_matches.group(5)))
message_count = 5
for _ in range(message_count):
self.bus.send(Message(arbitration_id=config["nodeId"],
is_fd=config["isFd"],
data=can_data))
sleep(1) # Wait while connector process CAN message
self.assertEqual(self.gateway.send_to_storage.call_count, message_count)
self.gateway.send_to_storage.assert_called_with(self.connector.get_name(),
{"deviceName": device_name,
"deviceType": self.config["devices"][0]["type"],
"attributes": [{"serialNumber": string_value}],
"telemetry": []})
def test_send_only_on_change_and_default_device_type(self):
self._create_connector("ts_and_attr.json")
config = self.config["devices"][1]["timeseries"][0]
value_matches = re.search(self.connector.VALUE_REGEX, config["value"])
value = randint(0, pow(2, int(value_matches.group(2))))
can_data = list(bytearray.fromhex("0" * 2 * int(value_matches.group(1))))
can_data.extend(value.to_bytes(int(value_matches.group(2)),
value_matches.group(3) if value_matches.group(
3) else self.connector.DEFAULT_BYTEORDER))
for _ in range(5):
self.bus.send(Message(arbitration_id=config["nodeId"],
data=can_data))
sleep(1)
self.gateway.send_to_storage.assert_called_once_with(self.connector.get_name(),
{"deviceName": self.config["devices"][1]["name"],
"deviceType": self.connector._CanConnector__connector_type,
"attributes": [],
"telemetry": [{config["key"]: value}]})
class CanConnectorAttributeUpdatesTests(CanConnectorTestsBase):
def test_update(self):
reader = BufferedReader()
bus_notifier = Notifier(self.bus, [reader])
self._create_connector("attribute_updates.json")
configs = self.config["devices"][0]["attributeUpdates"]
updates = {"device": self.config["devices"][0]["name"],
"data": {
"boolAttr": True,
"intAttr": randint(-int(pow(2, configs[1]["dataLength"]) / 2),
pow(2, configs[1]["dataLength"] - 1)),
"floatAttr": uniform(-3.1415926535, 3.1415926535),
"stringAttr": ''.join(choice(ascii_lowercase) for _ in range(8)),
"wrongConfigAttr": True
}}
data_list = [[int(updates["data"]["boolAttr"])],
updates["data"]["intAttr"].to_bytes(configs[1]["dataLength"],
configs[1]["dataByteorder"],
signed=(updates["data"]["intAttr"] < 0)),
list(struct.pack(">f", updates["data"]["floatAttr"])),
list(str("Test" + updates["data"]["stringAttr"]).encode(self.connector.DEFAULT_ENCODING))
]
self.connector.on_attributes_update(updates)
sleep(1)
self.connector.close()
bus_notifier.stop()
messages = []
while True:
msg = reader.get_message(1)
if msg is None:
break
messages.append(msg)
self.assertEqual(len(messages), len(data_list))
messages = sorted(messages, key=lambda message: message.arbitration_id)
for i in range(len(messages)):
self.assertTrue(messages[i].equals(Message(arbitration_id=configs[i]["nodeId"],
is_extended_id=configs[i].get("isExtendedId",
self.connector.DEFAULT_EXTENDED_ID_FLAG),
is_fd=configs[i].get("isFd", self.connector.DEFAULT_FD_FLAG),
bitrate_switch=configs[i].get("bitrateSwitch",
self.connector.DEFAULT_BITRATE_SWITCH_FLAG),
data=data_list[i],
timestamp=messages[i].timestamp,
channel=messages[i].channel)))
class CanConnectorRpcTests(CanConnectorTestsBase):
def _create_bus(self):
return Bus(
channel="virtual_channel",
bustype="virtual",
receive_own_messages=False,
is_fd=True
)
def test_rpc_with_hex_data_in_config(self):
self._create_connector("rpc.json")
config = self.config["devices"][0]["serverSideRpc"][0]
self.connector.server_side_rpc_handler({"device": self.config["devices"][0]["name"],
"data": {
"id": 1,
"method": config["method"]
}})
actual_message = self.bus.recv(1)
self.assertTrue(actual_message.equals(Message(arbitration_id=config["nodeId"],
is_fd=config["isFd"],
bitrate_switch=config["bitrateSwitch"],
data=bytearray.fromhex(config["dataInHex"]),
timestamp=actual_message.timestamp,
channel=actual_message.channel)))
def test_rpc_with_hex_data_in_params(self):
self._create_connector("rpc.json")
config = self.config["devices"][1]["serverSideRpc"][0]
hex_data = "1234 abcd"
self.assertNotEqual(hex_data, config["dataInHex"])
self.connector.server_side_rpc_handler({"device": self.config["devices"][1]["name"],
"data": {
"id": 1,
"method": config["method"],
"params": {
"dataInHex": hex_data
}
}})
actual_message = self.bus.recv(1)
self.assertTrue(actual_message.equals(Message(arbitration_id=config["nodeId"],
is_fd=config["isFd"],
bitrate_switch=config["bitrateSwitch"],
data=bytearray.fromhex(hex_data),
timestamp=actual_message.timestamp,
channel=actual_message.channel)))
def test_rpc_expression_in_config(self):
self._create_connector("rpc.json")
config = self.config["devices"][0]["serverSideRpc"][1]
max_allowed_speed = randint(100, 200)
user_speed = randint(150, 250)
self.connector.server_side_rpc_handler({"device": self.config["devices"][0]["name"],
"data": {
"id": 1,
"method": config["method"],
"params": {
"userSpeed": user_speed,
"maxAllowedSpeed": max_allowed_speed
}
}})
can_data = int(user_speed if max_allowed_speed > user_speed else max_allowed_speed)\
.to_bytes(config["dataLength"], "little")
actual_message = self.bus.recv(1)
self.assertTrue(actual_message.equals(Message(arbitration_id=config["nodeId"],
is_extended_id=config.get("isExtendedId",
self.connector.DEFAULT_EXTENDED_ID_FLAG),
data=can_data,
timestamp=actual_message.timestamp,
channel=actual_message.channel)))
def test_deny_unknown_rpc(self):
self._create_connector("rpc.json")
self.connector.server_side_rpc_handler({"device": self.config["devices"][0]["name"],
"data": {
"id": 1,
"method": ''.join(choice(ascii_lowercase) for _ in range(8))
}})
self.assertIsNone(self.bus.recv(5))
def test_enable_unknown_rpc(self):
self._create_connector("rpc.json")
max_not_extended_node_id = 0x800
node_id = randint(0, 0x20000000)
data_before = "aa bb"
data_after = "cc dd ee ff"
data_length = 4
integer_value = randint(-int(pow(2, 8 * data_length) / 2), pow(2, 8 * data_length) - 1)
can_data = list(bytearray.fromhex(data_before))
can_data.extend(integer_value.to_bytes(data_length, "big", signed=(integer_value < 0)))
can_data.extend(bytearray.fromhex(data_after))
self.connector.server_side_rpc_handler({"device": self.config["devices"][2]["name"],
"data": {
"id": 1,
"method": ''.join(choice(ascii_lowercase) for _ in range(8)),
"params": {
"value": integer_value,
"nodeId": node_id,
"isExtendedId": (node_id > max_not_extended_node_id),
"isFd": (len(can_data) > 8),
"dataLength": data_length,
# Actually value may be either signed or unsigned,
# connector should process this case correctly
"dataSigned": False,
"dataBefore": data_before,
"dataAfter": data_after,
"response": True
}
}})
actual_message = self.bus.recv(1)
self.assertTrue(actual_message.equals(Message(arbitration_id=node_id,
is_extended_id=(node_id > max_not_extended_node_id),
is_fd=(len(can_data) > 8),
data=can_data,
timestamp=actual_message.timestamp,
channel=actual_message.channel)))
self.gateway.send_rpc_reply.assert_called_once_with(self.config["devices"][2]["name"],
1,
{"success": True})
def test_rpc_response_failed(self):
self._create_connector("rpc.json")
config = self.config["devices"][3]["serverSideRpc"][0]
self.connector.server_side_rpc_handler({"device": self.config["devices"][3]["name"],
"data": {
"id": 1,
"method": config["method"]
}})
sleep(1)
self.gateway.send_rpc_reply.assert_called_once_with(self.config["devices"][3]["name"],
1,
{"success": False})
if __name__ == '__main__':
unittest.main()
|
|
import sys
import time
import unittest
from django.conf import settings
from django.db import transaction, connection
from django.db.utils import ConnectionHandler, DEFAULT_DB_ALIAS, DatabaseError
from django.test import (TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature)
from django.utils.functional import wraps
from django.utils import unittest
from models import Person
# Some tests require threading, which might not be available. So create a
# skip-test decorator for those test functions.
try:
import threading
except ImportError:
threading = None
requires_threading = unittest.skipUnless(threading, 'requires threading')
class SelectForUpdateTests(TransactionTestCase):
def setUp(self):
transaction.enter_transaction_management(True)
transaction.managed(True)
self.person = Person.objects.create(name='Reinhardt')
# We have to commit here so that code in run_select_for_update can
# see this data.
transaction.commit()
# We need another database connection to test that one connection
# issuing a SELECT ... FOR UPDATE will block.
new_connections = ConnectionHandler(settings.DATABASES)
self.new_connection = new_connections[DEFAULT_DB_ALIAS]
# We need to set settings.DEBUG to True so we can capture
# the output SQL to examine.
self._old_debug = settings.DEBUG
settings.DEBUG = True
def tearDown(self):
try:
# We don't really care if this fails - some of the tests will set
# this in the course of their run.
transaction.managed(False)
transaction.leave_transaction_management()
except transaction.TransactionManagementError:
pass
self.new_connection.close()
settings.DEBUG = self._old_debug
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
def start_blocking_transaction(self):
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
result = self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.new_connection._rollback()
def has_for_update_sql(self, tested_connection, nowait=False):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = tested_connection.ops.for_update_sql(nowait)
sql = tested_connection.queries[-1]['sql']
return bool(sql.find(for_update_sql) > -1)
def check_exc(self, exc):
self.failUnless(isinstance(exc, DatabaseError))
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
Test that the backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(connection))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
Test that the backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(connection, nowait=True))
# In Python 2.6 beta and some final releases, exceptions raised in __len__
# are swallowed (Python issue 1242657), so these cases return an empty
# list, rather than raising an exception. Not a lot we can do about that,
# unfortunately, due to the way Python handles list() calls internally.
# Thus, we skip this test for Python 2.6.
@requires_threading
@skipUnlessDBFeature('has_select_for_update_nowait')
@unittest.skipIf(sys.version_info[:2] == (2, 6), "Python version is 2.6")
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.check_exc(status[-1])
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_nowait_raises_error(self):
"""
If a SELECT...FOR UPDATE NOWAIT is run on a database backend
that supports FOR UPDATE but not NOWAIT, then we should find
that a DatabaseError is raised.
"""
self.assertRaises(
DatabaseError,
list,
Person.objects.all().select_for_update(nowait=True)
)
def run_select_for_update(self, status, nowait=False):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
In general, this will be run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
transaction.enter_transaction_management(True)
transaction.managed(True)
people = list(
Person.objects.all().select_for_update(nowait=nowait)
)
people[0].name = 'Fred'
people[0].save()
transaction.commit()
except DatabaseError, e:
status.append(e)
except Exception, e:
raise
@requires_threading
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
Check that a thread running a select_for_update that
accesses rows being touched by a similar operation
on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError, 'Thread did not run and block'
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.failIf(thread.isAlive())
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@requires_threading
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Check that running a raw query which can't obtain a FOR UPDATE lock
raises the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError, e:
status.append(e)
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.check_exc(status[-1])
@skipUnlessDBFeature('has_select_for_update')
def test_transaction_dirty_managed(self):
""" Check that a select_for_update sets the transaction to be
dirty when executed under txn management. Setting the txn dirty
means that it will be either committed or rolled back by Django,
which will release any locks held by the SELECT FOR UPDATE.
"""
people = list(Person.objects.select_for_update())
self.assertTrue(transaction.is_dirty())
@skipUnlessDBFeature('has_select_for_update')
def test_transaction_not_dirty_unmanaged(self):
""" If we're not under txn management, the txn will never be
marked as dirty.
"""
transaction.managed(False)
transaction.leave_transaction_management()
people = list(Person.objects.select_for_update())
self.assertFalse(transaction.is_dirty())
|
|
##
# Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: Wilfredo Sanchez, wsanchez@apple.com
##
"""
WebDAV file operations
This API is considered private to static.py and is therefore subject to
change.
"""
__all__ = [
"delete",
"copy",
"move",
"put",
"mkcollection",
]
import os
import urllib
from urlparse import urlsplit
from twisted.python import log
from twisted.python.filepath import FilePath
from twisted.python.failure import Failure
from twisted.internet.defer import succeed, deferredGenerator, waitForDeferred
from twisted.web2 import responsecode
from twisted.web2.http import StatusResponse, HTTPError
from twisted.web2.stream import FileStream, readIntoFile
from twisted.web2.dav.http import ResponseQueue, statusForFailure
def delete(uri, filepath, depth="infinity"):
"""
Perform a X{DELETE} operation on the given URI, which is backed by the given
filepath.
@param filepath: the L{FilePath} to delete.
@param depth: the recursion X{Depth} for the X{DELETE} operation, which must
be "infinity".
@raise HTTPError: (containing a response with a status code of
L{responsecode.BAD_REQUEST}) if C{depth} is not "infinity".
@raise HTTPError: (containing an appropriate response) if the
delete operation fails. If C{filepath} is a directory, the response
will be a L{MultiStatusResponse}.
@return: a deferred response with a status code of L{responsecode.NO_CONTENT}
if the X{DELETE} operation succeeds.
"""
#
# Remove the file(s)
#
# FIXME: defer
if filepath.isdir():
#
# RFC 2518, section 8.6 says that we must act as if the Depth header is
# set to infinity, and that the client must omit the Depth header or set
# it to infinity, meaning that for collections, we will delete all
# members.
#
# This seems somewhat at odds with the notion that a bad request should
# be rejected outright; if the client sends a bad depth header, the
# client is broken, and RFC 2518, section 8 suggests that a bad request
# should be rejected...
#
# Let's play it safe for now and ignore broken clients.
#
if depth != "infinity":
msg = ("Client sent illegal depth header value for DELETE: %s" % (depth,))
log.err(msg)
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, msg))
#
# Recursive delete
#
# RFC 2518, section 8.6 says that if we get an error deleting a resource
# other than the collection in the request-URI, that we must respond
# with a multi-status response containing error statuses for each
# resource that we fail to delete. It also says we should not return
# no-content (success) status, which means that we should continue after
# errors, rather than aborting right away. This is interesting in that
# it's different from how most operating system tools act (eg. rm) when
# recursive filsystem deletes fail.
#
uri_path = urllib.unquote(urlsplit(uri)[2])
if uri_path[-1] == "/":
uri_path = uri_path[:-1]
log.msg("Deleting directory %s" % (filepath.path,))
# NOTE: len(uri_path) is wrong if os.sep is not one byte long... meh.
request_basename = filepath.path[:-len(uri_path)]
request_basename_len = len(request_basename)
errors = ResponseQueue(request_basename, "DELETE", responsecode.NO_CONTENT)
# FIXME: defer this
for dir, subdirs, files in os.walk(filepath.path, topdown=False):
for filename in files:
path = os.path.join(dir, filename)
try:
os.remove(path)
except:
errors.add(path, Failure())
for subdir in subdirs:
path = os.path.join(dir, subdir)
if os.path.islink(path):
try:
os.remove(path)
except:
errors.add(path, Failure())
else:
try:
os.rmdir(path)
except:
errors.add(path, Failure())
try:
os.rmdir(filepath.path)
except:
raise HTTPError(statusForFailure(
Failure(),
"deleting directory: %s" % (filepath.path,)
))
response = errors.response()
else:
#
# Delete a file; much simpler, eh?
#
log.msg("Deleting file %s" % (filepath.path,))
try:
os.remove(filepath.path)
except:
raise HTTPError(statusForFailure(
Failure(),
"deleting file: %s" % (filepath.path,)
))
response = responsecode.NO_CONTENT
# Restat filepath since we deleted the backing file
filepath.restat(False)
return succeed(response)
def copy(source_filepath, destination_filepath, destination_uri, depth):
"""
Perform a X{COPY} from the given source and destination filepaths.
This will perform a X{DELETE} on the destination if necessary; the caller
should check and handle the X{overwrite} header before calling L{copy} (as
in L{COPYMOVE.prepareForCopy}).
@param source_filepath: a L{FilePath} for the file to copy from.
@param destination_filepath: a L{FilePath} for the file to copy to.
@param destination_uri: the URI of the destination resource.
@param depth: the recursion X{Depth} for the X{COPY} operation, which must
be one of "0", "1", or "infinity".
@raise HTTPError: (containing a response with a status code of
L{responsecode.BAD_REQUEST}) if C{depth} is not "0", "1" or "infinity".
@raise HTTPError: (containing an appropriate response) if the operation
fails. If C{source_filepath} is a directory, the response will be a
L{MultiStatusResponse}.
@return: a deferred response with a status code of L{responsecode.CREATED}
if the destination already exists, or L{responsecode.NO_CONTENT} if the
destination was created by the X{COPY} operation.
"""
if source_filepath.isfile():
#
# Copy the file
#
log.msg("Copying file %s to %s" % (source_filepath.path, destination_filepath.path))
try:
source_file = source_filepath.open()
except:
raise HTTPError(statusForFailure(
Failure(),
"opening file for reading: %s" % (source_filepath.path,)
))
source_stream = FileStream(source_file)
response = waitForDeferred(put(source_stream, destination_filepath, destination_uri))
yield response
try:
response = response.getResult()
finally:
source_stream.close()
source_file.close()
checkResponse(response, "put", responsecode.NO_CONTENT, responsecode.CREATED)
yield response
return
elif source_filepath.isdir():
if destination_filepath.exists():
#
# Delete the destination
#
response = waitForDeferred(delete(destination_uri, destination_filepath))
yield response
response = response.getResult()
checkResponse(response, "delete", responsecode.NO_CONTENT)
success_code = responsecode.NO_CONTENT
else:
success_code = responsecode.CREATED
#
# Copy the directory
#
log.msg("Copying directory %s to %s" % (source_filepath.path, destination_filepath.path))
source_basename = source_filepath.path
destination_basename = destination_filepath.path
errors = ResponseQueue(source_basename, "COPY", success_code)
if destination_filepath.parent().isdir():
if os.path.islink(source_basename):
link_destination = os.readlink(source_basename)
if link_destination[0] != os.path.sep:
link_destination = os.path.join(source_basename, link_destination)
try:
os.symlink(destination_basename, link_destination)
except:
errors.add(source_basename, Failure())
else:
try:
os.mkdir(destination_basename)
except:
raise HTTPError(statusForFailure(
Failure(),
"creating directory %s" % (destination_basename,)
))
if depth == "0":
yield success_code
return
else:
raise HTTPError(StatusResponse(
responsecode.CONFLICT,
"Parent collection for destination %s does not exist" % (destination_uri,)
))
#
# Recursive copy
#
# FIXME: When we report errors, do we report them on the source URI
# or on the destination URI? We're using the source URI here.
#
# FIXME: defer the walk?
source_basename_len = len(source_basename)
def paths(basepath, subpath):
source_path = os.path.join(basepath, subpath)
assert source_path.startswith(source_basename)
destination_path = os.path.join(destination_basename, source_path[source_basename_len+1:])
return source_path, destination_path
for dir, subdirs, files in os.walk(source_filepath.path, topdown=True):
for filename in files:
source_path, destination_path = paths(dir, filename)
if not os.path.isdir(os.path.dirname(destination_path)):
errors.add(source_path, responsecode.NOT_FOUND)
else:
response = waitForDeferred(copy(FilePath(source_path), FilePath(destination_path), destination_uri, depth))
yield response
response = response.getResult()
checkResponse(response, "copy", responsecode.NO_CONTENT)
for subdir in subdirs:
source_path, destination_path = paths(dir, subdir)
log.msg("Copying directory %s to %s" % (source_path, destination_path))
if not os.path.isdir(os.path.dirname(destination_path)):
errors.add(source_path, responsecode.CONFLICT)
else:
if os.path.islink(source_path):
link_destination = os.readlink(source_path)
if link_destination[0] != os.path.sep:
link_destination = os.path.join(source_path, link_destination)
try:
os.symlink(destination_path, link_destination)
except:
errors.add(source_path, Failure())
else:
try:
os.mkdir(destination_path)
except:
errors.add(source_path, Failure())
yield errors.response()
return
else:
log.err("Unable to COPY to non-file: %s" % (source_filepath.path,))
raise HTTPError(StatusResponse(
responsecode.FORBIDDEN,
"The requested resource exists but is not backed by a regular file."
))
raise AssertionError("We shouldn't be here.")
copy = deferredGenerator(copy)
def move(source_filepath, source_uri, destination_filepath, destination_uri, depth):
"""
Perform a X{MOVE} from the given source and destination filepaths.
This will perform a X{DELETE} on the destination if necessary; the caller
should check and handle the X{overwrite} header before calling L{copy} (as
in L{COPYMOVE.prepareForCopy}).
Following the X{DELETE}, this will attempt an atomic filesystem move. If
that fails, a X{COPY} operation followed by a X{DELETE} on the source will
be attempted instead.
@param source_filepath: a L{FilePath} for the file to copy from.
@param destination_filepath: a L{FilePath} for the file to copy to.
@param destination_uri: the URI of the destination resource.
@param depth: the recursion X{Depth} for the X{MOVE} operation, which must
be "infinity".
@raise HTTPError: (containing a response with a status code of
L{responsecode.BAD_REQUEST}) if C{depth} is not "infinity".
@raise HTTPError: (containing an appropriate response) if the operation
fails. If C{source_filepath} is a directory, the response will be a
L{MultiStatusResponse}.
@return: a deferred response with a status code of L{responsecode.CREATED}
if the destination already exists, or L{responsecode.NO_CONTENT} if the
destination was created by the X{MOVE} operation.
"""
log.msg("Moving %s to %s" % (source_filepath.path, destination_filepath.path))
#
# Choose a success status
#
if destination_filepath.exists():
#
# Delete the destination
#
response = waitForDeferred(delete(destination_uri, destination_filepath))
yield response
response = response.getResult()
checkResponse(response, "delete", responsecode.NO_CONTENT)
success_code = responsecode.NO_CONTENT
else:
success_code = responsecode.CREATED
#
# See if rename (which is atomic, and fast) works
#
try:
os.rename(source_filepath.path, destination_filepath.path)
except OSError:
pass
else:
# Restat source filepath since we moved it
source_filepath.restat(False)
yield success_code
return
#
# Do a copy, then delete the source
#
response = waitForDeferred(copy(source_filepath, destination_filepath, destination_uri, depth))
yield response
response = response.getResult()
checkResponse(response, "copy", responsecode.CREATED, responsecode.NO_CONTENT)
response = waitForDeferred(delete(source_uri, source_filepath))
yield response
response = response.getResult()
checkResponse(response, "delete", responsecode.NO_CONTENT)
yield success_code
move = deferredGenerator(move)
def put(stream, filepath, uri=None):
"""
Perform a PUT of the given data stream into the given filepath.
@param stream: the stream to write to the destination.
@param filepath: the L{FilePath} of the destination file.
@param uri: the URI of the destination resource.
If the destination exists, if C{uri} is not C{None}, perform a
X{DELETE} operation on the destination, but if C{uri} is C{None},
delete the destination directly.
Note that whether a L{put} deletes the destination directly vs.
performing a X{DELETE} on the destination affects the response returned
in the event of an error during deletion. Specifically, X{DELETE}
on collections must return a L{MultiStatusResponse} under certain
circumstances, whereas X{PUT} isn't required to do so. Therefore,
if the caller expects X{DELETE} semantics, it must provide a valid
C{uri}.
@raise HTTPError: (containing an appropriate response) if the operation
fails.
@return: a deferred response with a status code of L{responsecode.CREATED}
if the destination already exists, or L{responsecode.NO_CONTENT} if the
destination was created by the X{PUT} operation.
"""
log.msg("Writing to file %s" % (filepath.path,))
if filepath.exists():
if uri is None:
try:
if filepath.isdir():
rmdir(filepath.path)
else:
os.remove(filepath.path)
except:
raise HTTPError(statusForFailure(
Failure(),
"writing to file: %s" % (filepath.path,)
))
else:
response = waitForDeferred(delete(uri, filepath))
yield response
response = response.getResult()
checkResponse(response, "delete", responsecode.NO_CONTENT)
success_code = responsecode.NO_CONTENT
else:
success_code = responsecode.CREATED
#
# Write the contents of the request stream to resource's file
#
try:
resource_file = filepath.open("w")
except:
raise HTTPError(statusForFailure(
Failure(),
"opening file for writing: %s" % (filepath.path,)
))
try:
x = waitForDeferred(readIntoFile(stream, resource_file))
yield x
x.getResult()
except:
raise HTTPError(statusForFailure(
Failure(),
"writing to file: %s" % (filepath.path,)
))
# Restat filepath since we modified the backing file
filepath.restat(False)
yield success_code
put = deferredGenerator(put)
def mkcollection(filepath):
"""
Perform a X{MKCOL} on the given filepath.
@param filepath: the L{FilePath} of the collection resource to create.
@raise HTTPError: (containing an appropriate response) if the operation
fails.
@return: a deferred response with a status code of L{responsecode.CREATED}
if the destination already exists, or L{responsecode.NO_CONTENT} if the
destination was created by the X{MKCOL} operation.
"""
try:
os.mkdir(filepath.path)
# Restat filepath because we modified it
filepath.restat(False)
except:
raise HTTPError(statusForFailure(
Failure(),
"creating directory in MKCOL: %s" % (filepath.path,)
))
return succeed(responsecode.CREATED)
def rmdir(dirname):
"""
Removes the directory with the given name, as well as its contents.
@param dirname: the path to the directory to remove.
"""
for dir, subdirs, files in os.walk(dirname, topdown=False):
for filename in files:
os.remove(os.path.join(dir, filename))
for subdir in subdirs:
path = os.path.join(dir, subdir)
if os.path.islink(path):
os.remove(path)
else:
os.rmdir(path)
os.rmdir(dirname)
def checkResponse(response, method, *codes):
assert (
response in codes,
"%s() should have raised, but returned one of %r instead" % (method, codes)
)
|
|
import django
from django.conf.urls import patterns, include
from django.core.exceptions import ValidationError
from django.contrib import admin
from django.contrib.admin.views.main import ChangeList
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.db import models
from django.test import TestCase, RequestFactory
from .admin import SafeDeleteAdmin, highlight_deleted
from .models import (safedelete_mixin_factory, SoftDeleteMixin,
HARD_DELETE, HARD_DELETE_NOCASCADE, SOFT_DELETE,
NO_DELETE, DELETED_VISIBLE_BY_PK)
# MODELS (FOR TESTING)
class Author(safedelete_mixin_factory(HARD_DELETE_NOCASCADE)):
name = models.CharField(max_length=200)
class Category(safedelete_mixin_factory(SOFT_DELETE, visibility=DELETED_VISIBLE_BY_PK)):
name = models.CharField(max_length=200, unique=True)
class Article(safedelete_mixin_factory(HARD_DELETE)):
name = models.CharField(max_length=200)
author = models.ForeignKey(Author)
category = models.ForeignKey(Category, null=True, default=None)
def __unicode__(self):
return 'Article ({0}): {1}'.format(self.pk, self.name)
class Order(SoftDeleteMixin):
name = models.CharField(max_length=100)
articles = models.ManyToManyField(Article)
class VeryImportant(safedelete_mixin_factory(NO_DELETE)):
name = models.CharField(max_length=200)
# ADMINMODEL (FOR TESTING)
class CategoryAdmin(SafeDeleteAdmin):
list_display = (highlight_deleted,) + SafeDeleteAdmin.list_display
admin.site.register(Category, CategoryAdmin)
# URLS (FOR TESTING)
urlpatterns = patterns(
'',
(r'^admin/', include(admin.site.urls)),
)
# TESTS
class SimpleTest(TestCase):
def setUp(self):
self.authors = (
Author.objects.create(name='author 0'),
Author.objects.create(name='author 1'),
Author.objects.create(name='author 2'),
)
self.categories = (
Category.objects.create(name='category 0'),
Category.objects.create(name='category 1'),
Category.objects.create(name='category 2'),
)
self.articles = (
Article.objects.create(name='article 0', author=self.authors[1]),
Article.objects.create(name='article 1', author=self.authors[1], category=self.categories[1]),
Article.objects.create(name='article 2', author=self.authors[2], category=self.categories[2]),
)
self.order = Order.objects.create(name='order')
self.order.articles.add(self.articles[0], self.articles[1])
def test_softdelete(self):
self.assertEqual(Order.objects.count(), 1)
self.order.delete()
self.assertEqual(Order.objects.count(), 0)
self.assertEqual(Order.objects.all_with_deleted().count(), 1)
self.order.save()
self.assertEqual(Order.objects.count(), 1)
def test_hard_delete(self):
self.assertEqual(Article.objects.count(), 3)
self.articles[0].delete()
self.assertEqual(Article.objects.count(), 2)
self.assertEqual(Article.objects.all_with_deleted().count(), 2)
self.articles[1].delete(force_policy=SOFT_DELETE)
self.assertEqual(Article.objects.count(), 1)
self.assertEqual(Article.objects.all_with_deleted().count(), 2)
self.assertEqual(Article.objects.filter(author=self.authors[2]).count(), 1)
def test_hard_delete_nocascade(self):
self.assertEqual(Author.objects.count(), 3)
self.authors[0].delete()
self.assertEqual(Author.objects.count(), 2)
self.assertEqual(Author.objects.all_with_deleted().count(), 2)
self.authors[1].delete()
self.assertEqual(Author.objects.count(), 1)
self.assertEqual(Author.objects.all_with_deleted().count(), 2)
self.assertEqual(Article.objects.count(), 3)
def test_no_delete(self):
obj = VeryImportant.objects.create(name="I don't wanna die :'(.")
obj.delete()
self.assertEqual(obj.deleted, False)
obj = VeryImportant.objects.get(pk=obj.pk)
self.assertEqual(obj.deleted, False)
def test_no_delete_manager(self):
obj = VeryImportant.objects.create(name="I don't wanna die :'(.")
VeryImportant.objects.all().delete()
obj = VeryImportant.objects.get(pk=obj.pk)
self.assertEqual(obj.deleted, False)
def test_save(self):
"""
When we save an object, it will be re-inserted if it was deleted,
the same way as save() will re-insert a deleted object.
"""
self.assertEqual(Order.objects.count(), 1)
self.order.delete()
self.assertEqual(Order.objects.count(), 0)
self.order.save()
self.assertEqual(Order.objects.count(), 1)
def test_undelete(self):
self.assertEqual(Order.objects.count(), 1)
self.order.delete()
self.assertEqual(Order.objects.count(), 0)
self.order.undelete()
self.assertEqual(Order.objects.count(), 1)
def test_access_by_pk(self):
"""
Ensure that we can access to a deleted category when we access it by pk.
We can do that because we have set visibility=DELETED_VISIBLE_BY_PK
"""
pk = self.categories[1].id
self.categories[1].delete()
self.assertRaises(Category.DoesNotExist, Category.objects.get, name=self.categories[1].name)
self.assertEqual(self.categories[1], Category.objects.get(pk=pk))
cat = Category.objects.filter(pk=pk)
self.assertEqual(len(cat), 1)
self.assertEqual(self.categories[1], cat[0])
def test_no_access_by_pk(self):
"""
Ensure that if we try to access a deleted object by pk (with the default visibility),
we can't access it.
"""
self.order.delete()
self.assertRaises(Order.DoesNotExist, Order.objects.get, pk=self.order.id)
def test_queryset(self):
self.assertEqual(Category.objects.count(), 3)
Category.objects.all().delete()
self.assertEqual(Category.objects.count(), 0)
Category.objects.all().undelete() # Nonsense
self.assertEqual(Category.objects.count(), 0)
Category.objects.deleted_only().undelete()
self.assertEqual(Category.objects.count(), 3)
def test_related_manager(self):
order = Order.objects.create(name='order 2')
Order.objects.create(name='order 3')
order.articles.add(self.articles[0])
self.assertEqual(self.articles[0].order_set.all().count(), 2)
order.delete()
self.assertEqual(self.articles[0].order_set.all().count(), 1)
# Ensure all_with_deleted() filter correctly on the article.
self.assertEqual(
self.articles[0].order_set.all_with_deleted().count(), 2
)
def test_prefetch_related(self):
""" prefetch_related() queryset should not be filtered by core_filter """
authors = Author.objects.all().prefetch_related('article_set')
for author in authors:
self.assertQuerysetEqual(
author.article_set.all().order_by('pk'),
[repr(a) for a in Author.objects.get(pk=author.pk).article_set.all().order_by('pk')]
)
def test_validate_unique(self):
""" Check that uniqueness is also checked against deleted objects """
Category.objects.create(name='test').delete()
with self.assertRaises(ValidationError):
Category(name='test').validate_unique()
class AdminTestCase(TestCase):
urls = 'safedelete.tests'
def setUp(self):
self.author = Author.objects.create(name='author 0')
self.categories = (
Category.objects.create(name='category 0'),
Category.objects.create(name='category 1'),
Category.objects.create(name='category 2'),
)
self.articles = (
Article(name='article 0', author=self.author),
Article(name='article 1', author=self.author, category=self.categories[1]),
Article(name='article 2', author=self.author, category=self.categories[2]),
)
self.categories[1].delete()
self.request_factory = RequestFactory()
self.request = self.request_factory.get('/', {})
self.modeladmin_default = admin.ModelAdmin(Category, AdminSite())
self.modeladmin = CategoryAdmin(Category, AdminSite())
User.objects.create_superuser("super", "", "secret")
self.client.login(username="super", password="secret")
def tearDown(self):
self.client.logout()
def get_changelist(self, request, model, modeladmin):
return ChangeList(
request, model, modeladmin.list_display,
modeladmin.list_display_links, modeladmin.list_filter,
modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page,
modeladmin.list_max_show_all, modeladmin.list_editable,
modeladmin
)
def test_admin_model(self):
changelist_default = self.get_changelist(self.request, Category, self.modeladmin_default)
changelist = self.get_changelist(self.request, Category, self.modeladmin)
if django.VERSION[1] == 4 or django.VERSION[1] == 5:
# Django == 1.4 or 1.5
self.assertEqual(changelist.get_filters(self.request)[0][0].title, "deleted")
self.assertEqual(changelist.get_query_set(self.request).count(), 3)
self.assertEqual(changelist_default.get_query_set(self.request).count(), 2)
else:
# Django >= 1.6
self.assertEqual(changelist.get_filters(self.request)[0][0].title, "deleted")
self.assertEqual(changelist.queryset.count(), 3)
self.assertEqual(changelist_default.queryset.count(), 2)
def test_admin_listing(self):
""" Test deleted objects are in red in admin listing. """
resp = self.client.get('/admin/safedelete/category/')
line = '<span class="deleted">{0}</span>'.format(self.categories[1])
self.assertContains(resp, line)
def test_admin_undelete_action(self):
""" Test objects are undeleted and action is logged. """
resp = self.client.post('/admin/safedelete/category/', data={
'index': 0,
'action': ['undelete_selected'],
'_selected_action': [self.categories[1].pk],
})
self.assertTemplateUsed(resp, 'safedelete/undelete_selected_confirmation.html')
category = Category.objects.get(pk=self.categories[1].pk)
self.assertTrue(self.categories[1].deleted)
resp = self.client.post('/admin/safedelete/category/', data={
'index': 0,
'action': ['undelete_selected'],
'post': True,
'_selected_action': [self.categories[1].pk],
})
category = Category.objects.get(pk=self.categories[1].pk)
self.assertFalse(category.deleted)
|
|
"""
Module for applying conditional formatting to
DataFrames and Series.
"""
from collections import defaultdict
from contextlib import contextmanager
import copy
from functools import partial
from itertools import product
from uuid import uuid1
import numpy as np
from pandas.compat import range
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_float, is_string_like
from pandas.core.dtypes.generic import ABCSeries
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
try:
from jinja2 import (
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
raise ImportError("pandas.Styler requires jinja2. "
"Please install with `conda install Jinja2`\n"
"or `pip install Jinja2`")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
precision : int
precision to round floats to, defaults to pd.options.display.precision
table_styles : list-like, default None
list of {selector: (attr, value)} dicts; see Notes
uuid : str, default None
a unique identifier to avoid CSS collisions; generated automatically
caption : str, default None
caption to attach to the table
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
Attributes
----------
env : Jinja2 Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
DataFrame.style
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = PackageLoader("pandas", "io/formats/templates")
env = Environment(
loader=loader,
trim_blocks=True,
)
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None, cell_ids=True):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option('display.precision')
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns = []
self.cell_ids = cell_ids
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if is_float(x):
return '{:>.{precision}g}'.format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
def _repr_html_(self):
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(_shared_docs['to_excel'] % dict(
axes='index, columns', klass='Styler',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='\n .. versionadded:: 0.20'))
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and
com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * (len(clabels[0]) - len(hidden_columns)))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and
not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns)}
# only add an id if the cell has a style
if (self.cell_ids or
not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ''
if 'class="' in table_attr:
table_attr = table_attr.replace('class="',
'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=table_attr)
def format(self, formatter, subset=None):
"""
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs):
"""
Render the built up styles to HTML.
Parameters
----------
**kwargs
Any additional keyword arguments are passed
through to ``self.template.render``.
This is useful when you need to provide
additional variables for a custom template.
.. versionadded:: 0.20
Returns
-------
rendered : str
The rendered HTML.
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs):
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
attrs : Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
"""
Reset the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis,
result_type='expand', **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
"Function {func!r} must return a DataFrame when "
"passed to `Styler.apply` with axis=None"
.format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
msg = ('Result of {func!r} must have identical index and '
'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
msg = ("Function {func!r} returned the wrong shape.\n"
"Result has shape: {res}\n"
"Expected shape: {expect}".format(func=func,
res=result.shape,
expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
def where(self, cond, value, other=None, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean
value : str
applied when ``cond`` returns true
other : str
applied when ``cond`` returns false
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``cond``
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ''
return self.applymap(lambda val: value if cond(val) else other,
subset=subset, **kwargs)
def set_precision(self, precision):
"""
Set the precision used to render.
Parameters
----------
precision : int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes):
"""
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
in addition to to automatic (by default) id.
Parameters
----------
attributes : string
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self):
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
Parameters
----------
styles : list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
Parameters
----------
uuid : str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption):
"""
Set the caption on a Styler
Parameters
----------
caption : str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles : list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
def hide_index(self):
"""
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset):
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return ('background-color: {color}'.format(color=null_color)
if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color : str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None, text_color_threshold=0.408):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : int or str
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high,
text_color_threshold=text_color_threshold)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""
Color background in a range according to the data.
"""
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
)
def set_properties(self, subset=None, **kwargs):
"""
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs : dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = ';'.join('{p}: {v}'.format(p=p, v=v)
for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
"""
Generate CSS code to draw a bar from start to end.
"""
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
)
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left', vmin=None, vmax=None):
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : int, str or None, default 0
Apply to each column (`axis=0` or `'index'`)
or to each row (`axis=1` or `'columns'`) or
to the entire DataFrame at once with `axis=None`.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like"
" of length 2: [`color_neg`, `color_pos`]"
" (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis,
align=align, colors=color, width=width,
vmin=vmin, vmax=vmax)
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
"""
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
"""
Highlight the minimum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
"""
Highlight the min or max in a Series or DataFrame.
"""
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
@classmethod
def from_custom_template(cls, searchpath, name):
"""
Factory function for creating a subclass of ``Styler``
with a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates
name : str
Name of your custom template to use for rendering
Returns
-------
MyStyler : subclass of Styler
Has the correct ``env`` and ``template`` class attributes set.
"""
loader = ChoiceLoader([
FileSystemLoader(searchpath),
cls.loader,
])
class MyStyler(cls):
env = Environment(loader=loader)
template = env.get_template(name)
return MyStyler
def pipe(self, func, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
.. versionadded:: 0.24.0
Parameters
----------
func : function
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
*args, **kwargs :
Arguments passed to `func`.
Returns
-------
object :
The value returned by ``func``.
See Also
--------
DataFrame.pipe : Analogous method for DataFrame.
Styler.apply : Apply a function row-wise, column-wise, or table-wise to
modify the dataframe's styling.
Notes
-----
Like :meth:`DataFrame.pipe`, this method can simplify the
application of several user-defined functions to a styler. Instead
of writing:
.. code-block:: python
f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)
users can write:
.. code-block:: python
(df.style.set_precision(3)
.pipe(g, arg1=a)
.pipe(f, arg2=b, arg3=c))
In particular, this allows users to define functions that take a
styler object, along with other parameters, and return the styler after
making styling changes (such as calling :meth:`Styler.apply` or
:meth:`Styler.set_properties`). Using ``.pipe``, these user-defined
style "transformations" can be interleaved with calls to the built-in
Styler interface.
Examples
--------
>>> def format_conversion(styler):
... return (styler.set_properties(**{'text-align': 'right'})
... .format({'conversion': '{:.1%}'}))
The user-defined ``format_conversion`` function above can be called
within a sequence of other style modifications:
>>> df = pd.DataFrame({'trial': list(range(5)),
... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})
>>> (df.style
... .highlight_min(subset=['conversion'], color='yellow')
... .pipe(format_conversion)
... .set_caption("Results with minimum conversion highlighted."))
"""
return com._pipe(self, func, *args, **kwargs)
def _is_visible(idx_row, idx_col, lengths):
"""
Index -> {(idx_row, idx_col): bool}).
"""
return (idx_col, idx_row) in lengths
def _get_level_lengths(index, hidden_elements=None):
"""
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
"""
sentinel = object()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if hidden_elements is None:
hidden_elements = []
lengths = {}
if index.nlevels == 1:
for i, value in enumerate(levels):
if(i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for i, lvl in enumerate(levels):
for j, row in enumerate(lvl):
if not get_option('display.multi_sparse'):
lengths[(i, j)] = 1
elif (row != sentinel) and (j not in hidden_elements):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
# even if its hidden, keep track of it in case
# length >1 and later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif(j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1}
return non_zero_lengths
def _maybe_wrap_formatter(formatter):
if is_string_like(formatter):
return lambda x: formatter.format(x)
elif callable(formatter):
return formatter
else:
msg = ("Expected a template string or callable, got {formatter} "
"instead".format(formatter=formatter))
raise TypeError(msg)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains a Google Cloud Storage hook."""
import functools
import gzip as gz
import os
import shutil
import time
import warnings
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from io import BytesIO
from os import path
from tempfile import NamedTemporaryFile
from typing import Callable, List, Optional, Sequence, Set, Tuple, TypeVar, Union, cast
from urllib.parse import urlparse
from google.api_core.exceptions import NotFound
from google.cloud import storage
from google.cloud.exceptions import GoogleCloudError
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
from airflow.utils import timezone
from airflow.version import version
RT = TypeVar('RT')
T = TypeVar("T", bound=Callable)
# Use default timeout from google-cloud-storage
DEFAULT_TIMEOUT = 60
def _fallback_object_url_to_object_name_and_bucket_name(
object_url_keyword_arg_name='object_url',
bucket_name_keyword_arg_name='bucket_name',
object_name_keyword_arg_name='object_name',
) -> Callable[[T], T]:
"""
Decorator factory that convert object URL parameter to object name and bucket name parameter.
:param object_url_keyword_arg_name: Name of the object URL parameter
:type object_url_keyword_arg_name: str
:param bucket_name_keyword_arg_name: Name of the bucket name parameter
:type bucket_name_keyword_arg_name: str
:param object_name_keyword_arg_name: Name of the object name parameter
:type object_name_keyword_arg_name: str
:return: Decorator
"""
def _wrapper(func: T):
@functools.wraps(func)
def _inner_wrapper(self: "GCSHook", *args, **kwargs) -> RT:
if args:
raise AirflowException(
"You must use keyword arguments in this methods rather than positional"
)
object_url = kwargs.get(object_url_keyword_arg_name)
bucket_name = kwargs.get(bucket_name_keyword_arg_name)
object_name = kwargs.get(object_name_keyword_arg_name)
if object_url and bucket_name and object_name:
raise AirflowException(
"The mutually exclusive parameters. `object_url`, `bucket_name` together "
"with `object_name` parameters are present. "
"Please provide `object_url` or `bucket_name` and `object_name`."
)
if object_url:
bucket_name, object_name = _parse_gcs_url(object_url)
kwargs[bucket_name_keyword_arg_name] = bucket_name
kwargs[object_name_keyword_arg_name] = object_name
del kwargs[object_url_keyword_arg_name]
if not object_name or not bucket_name:
raise TypeError(
f"{func.__name__}() missing 2 required positional arguments: "
f"'{bucket_name_keyword_arg_name}' and '{object_name_keyword_arg_name}' "
f"or {object_url_keyword_arg_name}"
)
if not object_name:
raise TypeError(
f"{func.__name__}() missing 1 required positional argument: "
f"'{object_name_keyword_arg_name}'"
)
if not bucket_name:
raise TypeError(
f"{func.__name__}() missing 1 required positional argument: "
f"'{bucket_name_keyword_arg_name}'"
)
return func(self, *args, **kwargs)
return cast(T, _inner_wrapper)
return _wrapper
class GCSHook(GoogleBaseHook):
"""
Interact with Google Cloud Storage. This hook uses the Google Cloud
connection.
"""
_conn = None # type: Optional[storage.Client]
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
google_cloud_storage_conn_id: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
# To preserve backward compatibility
# TODO: remove one day
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=2,
)
gcp_conn_id = google_cloud_storage_conn_id
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
def get_conn(self) -> storage.Client:
"""Returns a Google Cloud Storage service object."""
if not self._conn:
self._conn = storage.Client(
credentials=self._get_credentials(), client_info=self.client_info, project=self.project_id
)
return self._conn
def copy(
self,
source_bucket: str,
source_object: str,
destination_bucket: Optional[str] = None,
destination_object: Optional[str] = None,
) -> None:
"""
Copies an object from a bucket to another, with renaming if requested.
destination_bucket or destination_object can be omitted, in which case
source bucket/object is used, but not both.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
Can be omitted; then the same bucket is used.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
"""
destination_bucket = destination_bucket or source_bucket
destination_object = destination_object or source_object
if source_bucket == destination_bucket and source_object == destination_object:
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' % (source_bucket, source_object)
)
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.bucket(source_bucket)
source_object = source_bucket.blob(source_object) # type: ignore[attr-defined]
destination_bucket = client.bucket(destination_bucket)
destination_object = source_bucket.copy_blob( # type: ignore[attr-defined]
blob=source_object, destination_bucket=destination_bucket, new_name=destination_object
)
self.log.info(
'Object %s in bucket %s copied to object %s in bucket %s',
source_object.name, # type: ignore[attr-defined]
source_bucket.name, # type: ignore[attr-defined]
destination_object.name, # type: ignore[union-attr]
destination_bucket.name, # type: ignore[union-attr]
)
def rewrite(
self,
source_bucket: str,
source_object: str,
destination_bucket: str,
destination_object: Optional[str] = None,
) -> None:
"""
Has the same functionality as copy, except that will work on files
over 5 TB, as well as when copying between locations and/or storage
classes.
destination_object can be omitted, in which case source_object is used.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
"""
destination_object = destination_object or source_object
if source_bucket == destination_bucket and source_object == destination_object:
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' % (source_bucket, source_object)
)
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.bucket(source_bucket)
source_object = source_bucket.blob(blob_name=source_object) # type: ignore[attr-defined]
destination_bucket = client.bucket(destination_bucket)
token, bytes_rewritten, total_bytes = destination_bucket.blob( # type: ignore[attr-defined]
blob_name=destination_object
).rewrite(source=source_object)
self.log.info('Total Bytes: %s | Bytes Written: %s', total_bytes, bytes_rewritten)
while token is not None:
token, bytes_rewritten, total_bytes = destination_bucket.blob( # type: ignore[attr-defined]
blob_name=destination_object
).rewrite(source=source_object, token=token)
self.log.info('Total Bytes: %s | Bytes Written: %s', total_bytes, bytes_rewritten)
self.log.info(
'Object %s in bucket %s rewritten to object %s in bucket %s',
source_object.name, # type: ignore[attr-defined]
source_bucket.name, # type: ignore[attr-defined]
destination_object,
destination_bucket.name, # type: ignore[attr-defined]
)
def download(
self,
bucket_name: str,
object_name: str,
filename: Optional[str] = None,
chunk_size: Optional[int] = None,
timeout: Optional[int] = DEFAULT_TIMEOUT,
num_max_attempts: Optional[int] = 1,
) -> Union[str, bytes]:
"""
Downloads a file from Google Cloud Storage.
When no filename is supplied, the operator loads the file into memory and returns its
content. When a filename is supplied, it writes the file to the specified location and
returns the location. For file sizes that exceed the available memory it is recommended
to write to a file.
:param bucket_name: The bucket to fetch from.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str
:param filename: If set, a local file path where the file should be written to.
:type filename: str
:param chunk_size: Blob chunk size.
:type chunk_size: int
:param timeout: Request timeout in seconds.
:type timeout: int
:param num_max_attempts: Number of attempts to download the file.
:type num_max_attempts: int
"""
# TODO: future improvement check file size before downloading,
# to check for local space availability
num_file_attempts = 0
while num_file_attempts < num_max_attempts:
try:
num_file_attempts += 1
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name, chunk_size=chunk_size)
if filename:
blob.download_to_filename(filename, timeout=timeout)
self.log.info('File downloaded to %s', filename)
return filename
else:
return blob.download_as_string()
except GoogleCloudError:
if num_file_attempts == num_max_attempts:
self.log.error(
'Download attempt of object: %s from %s has failed. Attempt: %s, max %s.',
object_name,
object_name,
num_file_attempts,
num_max_attempts,
)
raise
# Wait with exponential backoff scheme before retrying.
timeout_seconds = 1.0 * 2 ** (num_file_attempts - 1)
time.sleep(timeout_seconds)
continue
@_fallback_object_url_to_object_name_and_bucket_name()
@contextmanager
def provide_file(
self,
bucket_name: Optional[str] = None,
object_name: Optional[str] = None,
object_url: Optional[str] = None,
):
"""
Downloads the file to a temporary directory and returns a file handle
You can use this method by passing the bucket_name and object_name parameters
or just object_url parameter.
:param bucket_name: The bucket to fetch from.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str
:param object_url: File reference url. Must start with "gs: //"
:type object_url: str
:return: File handler
"""
if object_name is None:
raise ValueError("Object name can not be empty")
_, _, file_name = object_name.rpartition("/")
with NamedTemporaryFile(suffix=file_name) as tmp_file:
self.download(bucket_name=bucket_name, object_name=object_name, filename=tmp_file.name)
tmp_file.flush()
yield tmp_file
@_fallback_object_url_to_object_name_and_bucket_name()
@contextmanager
def provide_file_and_upload(
self,
bucket_name: Optional[str] = None,
object_name: Optional[str] = None,
object_url: Optional[str] = None,
):
"""
Creates temporary file, returns a file handle and uploads the files content
on close.
You can use this method by passing the bucket_name and object_name parameters
or just object_url parameter.
:param bucket_name: The bucket to fetch from.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str
:param object_url: File reference url. Must start with "gs: //"
:type object_url: str
:return: File handler
"""
if object_name is None:
raise ValueError("Object name can not be empty")
_, _, file_name = object_name.rpartition("/")
with NamedTemporaryFile(suffix=file_name) as tmp_file:
yield tmp_file
tmp_file.flush()
self.upload(bucket_name=bucket_name, object_name=object_name, filename=tmp_file.name)
def upload(
self,
bucket_name: str,
object_name: str,
filename: Optional[str] = None,
data: Optional[Union[str, bytes]] = None,
mime_type: Optional[str] = None,
gzip: bool = False,
encoding: str = 'utf-8',
chunk_size: Optional[int] = None,
timeout: Optional[int] = DEFAULT_TIMEOUT,
num_max_attempts: int = 1,
) -> None:
"""
Uploads a local file or file data as string or bytes to Google Cloud Storage.
:param bucket_name: The bucket to upload to.
:type bucket_name: str
:param object_name: The object name to set when uploading the file.
:type object_name: str
:param filename: The local file path to the file to be uploaded.
:type filename: str
:param data: The file's data as a string or bytes to be uploaded.
:type data: str
:param mime_type: The file's mime type set when uploading the file.
:type mime_type: str
:param gzip: Option to compress local file or file data for upload
:type gzip: bool
:param encoding: bytes encoding for file data if provided as string
:type encoding: str
:param chunk_size: Blob chunk size.
:type chunk_size: int
:param timeout: Request timeout in seconds.
:type timeout: int
:param num_max_attempts: Number of attempts to try to upload the file.
:type num_max_attempts: int
"""
def _call_with_retry(f: Callable[[], None]) -> None:
"""Helper functions to upload a file or a string with a retry mechanism and exponential back-off.
:param f: Callable that should be retried.
:type f: Callable[[], None]
"""
num_file_attempts = 0
while num_file_attempts < num_max_attempts:
try:
num_file_attempts += 1
f()
except GoogleCloudError as e:
if num_file_attempts == num_max_attempts:
self.log.error(
'Upload attempt of object: %s from %s has failed. Attempt: %s, max %s.',
object_name,
object_name,
num_file_attempts,
num_max_attempts,
)
raise e
# Wait with exponential backoff scheme before retrying.
timeout_seconds = 1.0 * 2 ** (num_file_attempts - 1)
time.sleep(timeout_seconds)
continue
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name, chunk_size=chunk_size)
if filename and data:
raise ValueError(
"'filename' and 'data' parameter provided. Please "
"specify a single parameter, either 'filename' for "
"local file uploads or 'data' for file content uploads."
)
elif filename:
if not mime_type:
mime_type = 'application/octet-stream'
if gzip:
filename_gz = filename + '.gz'
with open(filename, 'rb') as f_in:
with gz.open(filename_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
filename = filename_gz
_call_with_retry(
partial(blob.upload_from_filename, filename=filename, content_type=mime_type, timeout=timeout)
)
if gzip:
os.remove(filename)
self.log.info('File %s uploaded to %s in %s bucket', filename, object_name, bucket_name)
elif data:
if not mime_type:
mime_type = 'text/plain'
if gzip:
if isinstance(data, str):
data = bytes(data, encoding)
out = BytesIO()
with gz.GzipFile(fileobj=out, mode="w") as f:
f.write(data)
data = out.getvalue()
_call_with_retry(partial(blob.upload_from_string, data, content_type=mime_type, timeout=timeout))
self.log.info('Data stream uploaded to %s in %s bucket', object_name, bucket_name)
else:
raise ValueError("'filename' and 'data' parameter missing. One is required to upload to gcs.")
def exists(self, bucket_name: str, object_name: str) -> bool:
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket.
:type object_name: str
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
return blob.exists()
def get_blob_update_time(self, bucket_name: str, object_name: str):
"""
Get the update time of a file in Google Cloud Storage
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the blob to get updated time from the Google cloud
storage bucket.
:type object_name: str
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
if blob is None:
raise ValueError(f"Object ({object_name}) not found in Bucket ({bucket_name})")
return blob.updated
def is_updated_after(self, bucket_name: str, object_name: str, ts: datetime) -> bool:
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param ts: The timestamp to check against.
:type ts: datetime.datetime
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not ts.tzinfo:
ts = ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s > %s", blob_update_time, ts)
if blob_update_time > ts:
return True
return False
def is_updated_between(
self, bucket_name: str, object_name: str, min_ts: datetime, max_ts: datetime
) -> bool:
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param min_ts: The minimum timestamp to check against.
:type min_ts: datetime.datetime
:param max_ts: The maximum timestamp to check against.
:type max_ts: datetime.datetime
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not min_ts.tzinfo:
min_ts = min_ts.replace(tzinfo=timezone.utc)
if not max_ts.tzinfo:
max_ts = max_ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s is between %s and %s", blob_update_time, min_ts, max_ts)
if min_ts <= blob_update_time < max_ts:
return True
return False
def is_updated_before(self, bucket_name: str, object_name: str, ts: datetime) -> bool:
"""
Checks if an blob_name is updated before given time in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param ts: The timestamp to check against.
:type ts: datetime.datetime
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not ts.tzinfo:
ts = ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s < %s", blob_update_time, ts)
if blob_update_time < ts:
return True
return False
def is_older_than(self, bucket_name: str, object_name: str, seconds: int) -> bool:
"""
Check if object is older than given time
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param seconds: The time in seconds to check against
:type seconds: int
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
from datetime import timedelta
current_time = timezone.utcnow()
given_time = current_time - timedelta(seconds=seconds)
self.log.info("Verify object date: %s is older than %s", blob_update_time, given_time)
if blob_update_time < given_time:
return True
return False
def delete(self, bucket_name: str, object_name: str) -> None:
"""
Deletes an object from the bucket.
:param bucket_name: name of the bucket, where the object resides
:type bucket_name: str
:param object_name: name of the object to delete
:type object_name: str
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
blob.delete()
self.log.info('Blob %s deleted.', object_name)
def delete_bucket(self, bucket_name: str, force: bool = False) -> None:
"""
Delete a bucket object from the Google Cloud Storage.
:param bucket_name: name of the bucket which will be deleted
:type bucket_name: str
:param force: false not allow to delete non empty bucket, set force=True
allows to delete non empty bucket
:type: bool
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
self.log.info("Deleting %s bucket", bucket_name)
try:
bucket.delete(force=force)
self.log.info("Bucket %s has been deleted", bucket_name)
except NotFound:
self.log.info("Bucket %s not exists", bucket_name)
def list(self, bucket_name, versions=None, max_results=None, prefix=None, delimiter=None) -> list:
"""
List all objects from the bucket with the give string prefix in name
:param bucket_name: bucket name
:type bucket_name: str
:param versions: if true, list all versions of the objects
:type versions: bool
:param max_results: max count of items to return in a single page of responses
:type max_results: int
:param prefix: prefix string which filters objects whose name begin with
this prefix
:type prefix: str
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
:type delimiter: str
:return: a stream of object names matching the filtering criteria
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
ids = []
page_token = None
while True:
blobs = bucket.list_blobs(
max_results=max_results,
page_token=page_token,
prefix=prefix,
delimiter=delimiter,
versions=versions,
)
blob_names = []
for blob in blobs:
blob_names.append(blob.name)
prefixes = blobs.prefixes
if prefixes:
ids += list(prefixes)
else:
ids += blob_names
page_token = blobs.next_page_token
if page_token is None:
# empty next page token
break
return ids
def list_by_timespan(
self,
bucket_name: str,
timespan_start: datetime,
timespan_end: datetime,
versions: bool = None,
max_results: int = None,
prefix: str = None,
delimiter: str = None,
) -> list:
"""
List all objects from the bucket with the give string prefix in name that were
updated in the time between ``timespan_start`` and ``timespan_end``.
:param bucket_name: bucket name
:type bucket_name: str
:param timespan_start: will return objects that were updated at or after this datetime (UTC)
:type timespan_start: datetime
:param timespan_end: will return objects that were updated before this datetime (UTC)
:type timespan_end: datetime
:param versions: if true, list all versions of the objects
:type versions: bool
:param max_results: max count of items to return in a single page of responses
:type max_results: int
:param prefix: prefix string which filters objects whose name begin with
this prefix
:type prefix: str
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
:type delimiter: str
:return: a stream of object names matching the filtering criteria
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
ids = []
page_token = None
while True:
blobs = bucket.list_blobs(
max_results=max_results,
page_token=page_token,
prefix=prefix,
delimiter=delimiter,
versions=versions,
)
blob_names = []
for blob in blobs:
if timespan_start <= blob.updated.replace(tzinfo=timezone.utc) < timespan_end:
blob_names.append(blob.name)
prefixes = blobs.prefixes
if prefixes:
ids += list(prefixes)
else:
ids += blob_names
page_token = blobs.next_page_token
if page_token is None:
# empty next page token
break
return ids
def get_size(self, bucket_name: str, object_name: str) -> int:
"""
Gets the size of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google
cloud storage bucket_name.
:type object_name: str
"""
self.log.info('Checking the file size of object: %s in bucket_name: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob_size = blob.size
self.log.info('The file size of %s is %s bytes.', object_name, blob_size)
return blob_size
def get_crc32c(self, bucket_name: str, object_name: str):
"""
Gets the CRC32c checksum of an object in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
"""
self.log.info(
'Retrieving the crc32c checksum of object_name: %s in bucket_name: %s',
object_name,
bucket_name,
)
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob_crc32c = blob.crc32c
self.log.info('The crc32c checksum of %s is %s', object_name, blob_crc32c)
return blob_crc32c
def get_md5hash(self, bucket_name: str, object_name: str) -> str:
"""
Gets the MD5 hash of an object in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
"""
self.log.info('Retrieving the MD5 hash of object: %s in bucket: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob_md5hash = blob.md5_hash
self.log.info('The md5Hash of %s is %s', object_name, blob_md5hash)
return blob_md5hash
@GoogleBaseHook.fallback_to_default_project_id
def create_bucket(
self,
bucket_name: str,
resource: Optional[dict] = None,
storage_class: str = 'MULTI_REGIONAL',
location: str = 'US',
project_id: Optional[str] = None,
labels: Optional[dict] = None,
) -> str:
"""
Creates a new bucket. Google Cloud Storage uses a flat namespace, so
you can't create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket.
:type bucket_name: str
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:type resource: dict
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage. Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:type storage_class: str
:param location: The location of the bucket.
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso::
https://developers.google.com/storage/docs/bucket-locations
:type location: str
:param project_id: The ID of the Google Cloud Project.
:type project_id: str
:param labels: User-provided labels, in key/value pairs.
:type labels: dict
:return: If successful, it returns the ``id`` of the bucket.
"""
self.log.info(
'Creating Bucket: %s; Location: %s; Storage Class: %s', bucket_name, location, storage_class
)
# Add airflow-version label to the bucket
labels = labels or {}
labels['airflow-version'] = 'v' + version.replace('.', '-').replace('+', '-')
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket_resource = resource or {}
for item in bucket_resource:
if item != "name":
bucket._patch_property(name=item, value=resource[item]) # type: ignore[index]
bucket.storage_class = storage_class
bucket.labels = labels
bucket.create(project=project_id, location=location)
return bucket.id
def insert_bucket_acl(
self, bucket_name: str, entity: str, role: str, user_project: Optional[str] = None
) -> None:
"""
Creates a new ACL entry on the specified bucket_name.
See: https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls/insert
:param bucket_name: Name of a bucket_name.
:type bucket_name: str
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers.
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:type entity: str
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER", "WRITER".
:type role: str
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
:type user_project: str
"""
self.log.info('Creating a new ACL entry in bucket: %s', bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket.acl.reload()
bucket.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
bucket.acl.user_project = user_project
bucket.acl.save()
self.log.info('A new ACL entry created in bucket: %s', bucket_name)
def insert_object_acl(
self,
bucket_name: str,
object_name: str,
entity: str,
role: str,
generation: Optional[int] = None,
user_project: Optional[str] = None,
) -> None:
"""
Creates a new ACL entry on the specified object.
See: https://cloud.google.com/storage/docs/json_api/v1/objectAccessControls/insert
:param bucket_name: Name of a bucket_name.
:type bucket_name: str
:param object_name: Name of the object. For information about how to URL encode
object names to be path safe, see:
https://cloud.google.com/storage/docs/json_api/#encoding
:type object_name: str
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:type entity: str
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER".
:type role: str
:param generation: Optional. If present, selects a specific revision of this object.
:type generation: long
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
:type user_project: str
"""
self.log.info('Creating a new ACL entry for object: %s in bucket: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name, generation=generation)
# Reload fetches the current ACL from Cloud Storage.
blob.acl.reload()
blob.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
blob.acl.user_project = user_project
blob.acl.save()
self.log.info('A new ACL entry created for object: %s in bucket: %s', object_name, bucket_name)
def compose(self, bucket_name: str, source_objects: List, destination_object: str) -> None:
"""
Composes a list of existing object into a new object in the same storage bucket_name
Currently it only supports up to 32 objects that can be concatenated
in a single operation
https://cloud.google.com/storage/docs/json_api/v1/objects/compose
:param bucket_name: The name of the bucket containing the source objects.
This is also the same bucket to store the composed destination object.
:type bucket_name: str
:param source_objects: The list of source objects that will be composed
into a single object.
:type source_objects: list
:param destination_object: The path of the object if given.
:type destination_object: str
"""
if not source_objects:
raise ValueError('source_objects cannot be empty.')
if not bucket_name or not destination_object:
raise ValueError('bucket_name and destination_object cannot be empty.')
self.log.info("Composing %s to %s in the bucket %s", source_objects, destination_object, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
destination_blob = bucket.blob(destination_object)
destination_blob.compose(
sources=[bucket.blob(blob_name=source_object) for source_object in source_objects]
)
self.log.info("Completed successfully.")
def sync(
self,
source_bucket: str,
destination_bucket: str,
source_object: Optional[str] = None,
destination_object: Optional[str] = None,
recursive: bool = True,
allow_overwrite: bool = False,
delete_extra_files: bool = False,
) -> None:
"""
Synchronizes the contents of the buckets.
Parameters ``source_object`` and ``destination_object`` describe the root sync directories. If they
are not passed, the entire bucket will be synchronized. If they are passed, they should point
to directories.
.. note::
The synchronization of individual files is not supported. Only entire directories can be
synchronized.
:param source_bucket: The name of the bucket containing the source objects.
:type source_bucket: str
:param destination_bucket: The name of the bucket containing the destination objects.
:type destination_bucket: str
:param source_object: The root sync directory in the source bucket.
:type source_object: Optional[str]
:param destination_object: The root sync directory in the destination bucket.
:type destination_object: Optional[str]
:param recursive: If True, subdirectories will be considered
:type recursive: bool
:param recursive: If True, subdirectories will be considered
:type recursive: bool
:param allow_overwrite: if True, the files will be overwritten if a mismatched file is found.
By default, overwriting files is not allowed
:type allow_overwrite: bool
:param delete_extra_files: if True, deletes additional files from the source that not found in the
destination. By default extra files are not deleted.
.. note::
This option can delete data quickly if you specify the wrong source/destination combination.
:type delete_extra_files: bool
:return: none
"""
client = self.get_conn()
# Create bucket object
source_bucket_obj = client.bucket(source_bucket)
destination_bucket_obj = client.bucket(destination_bucket)
# Normalize parameters when they are passed
source_object = self._normalize_directory_path(source_object)
destination_object = self._normalize_directory_path(destination_object)
# Calculate the number of characters that remove from the name, because they contain information
# about the parent's path
source_object_prefix_len = len(source_object) if source_object else 0
# Prepare synchronization plan
to_copy_blobs, to_delete_blobs, to_rewrite_blobs = self._prepare_sync_plan(
source_bucket=source_bucket_obj,
destination_bucket=destination_bucket_obj,
source_object=source_object,
destination_object=destination_object,
recursive=recursive,
)
self.log.info(
"Planned synchronization. To delete blobs count: %s, to upload blobs count: %s, "
"to rewrite blobs count: %s",
len(to_delete_blobs),
len(to_copy_blobs),
len(to_rewrite_blobs),
)
# Copy missing object to new bucket
if not to_copy_blobs:
self.log.info("Skipped blobs copying.")
else:
for blob in to_copy_blobs:
dst_object = self._calculate_sync_destination_path(
blob, destination_object, source_object_prefix_len
)
self.copy(
source_bucket=source_bucket_obj.name,
source_object=blob.name,
destination_bucket=destination_bucket_obj.name,
destination_object=dst_object,
)
self.log.info("Blobs copied.")
# Delete redundant files
if not to_delete_blobs:
self.log.info("Skipped blobs deleting.")
elif delete_extra_files:
# TODO: Add batch. I tried to do it, but the Google library is not stable at the moment.
for blob in to_delete_blobs:
self.delete(blob.bucket.name, blob.name)
self.log.info("Blobs deleted.")
# Overwrite files that are different
if not to_rewrite_blobs:
self.log.info("Skipped blobs overwriting.")
elif allow_overwrite:
for blob in to_rewrite_blobs:
dst_object = self._calculate_sync_destination_path(
blob, destination_object, source_object_prefix_len
)
self.rewrite(
source_bucket=source_bucket_obj.name,
source_object=blob.name,
destination_bucket=destination_bucket_obj.name,
destination_object=dst_object,
)
self.log.info("Blobs rewritten.")
self.log.info("Synchronization finished.")
def _calculate_sync_destination_path(
self, blob: storage.Blob, destination_object: Optional[str], source_object_prefix_len: int
) -> str:
return (
path.join(destination_object, blob.name[source_object_prefix_len:])
if destination_object
else blob.name[source_object_prefix_len:]
)
def _normalize_directory_path(self, source_object: Optional[str]) -> Optional[str]:
return source_object + "/" if source_object and not source_object.endswith("/") else source_object
@staticmethod
def _prepare_sync_plan(
source_bucket: storage.Bucket,
destination_bucket: storage.Bucket,
source_object: Optional[str],
destination_object: Optional[str],
recursive: bool,
) -> Tuple[Set[storage.Blob], Set[storage.Blob], Set[storage.Blob]]:
# Calculate the number of characters that remove from the name, because they contain information
# about the parent's path
source_object_prefix_len = len(source_object) if source_object else 0
destination_object_prefix_len = len(destination_object) if destination_object else 0
delimiter = "/" if not recursive else None
# Fetch blobs list
source_blobs = list(source_bucket.list_blobs(prefix=source_object, delimiter=delimiter))
destination_blobs = list(
destination_bucket.list_blobs(prefix=destination_object, delimiter=delimiter)
)
# Create indexes that allow you to identify blobs based on their name
source_names_index = {a.name[source_object_prefix_len:]: a for a in source_blobs}
destination_names_index = {a.name[destination_object_prefix_len:]: a for a in destination_blobs}
# Create sets with names without parent object name
source_names = set(source_names_index.keys())
destination_names = set(destination_names_index.keys())
# Determine objects to copy and delete
to_copy = source_names - destination_names
to_delete = destination_names - source_names
to_copy_blobs = {source_names_index[a] for a in to_copy} # type: Set[storage.Blob]
to_delete_blobs = {destination_names_index[a] for a in to_delete} # type: Set[storage.Blob]
# Find names that are in both buckets
names_to_check = source_names.intersection(destination_names)
to_rewrite_blobs = set() # type: Set[storage.Blob]
# Compare objects based on crc32
for current_name in names_to_check:
source_blob = source_names_index[current_name]
destination_blob = destination_names_index[current_name]
# if the objects are different, save it
if source_blob.crc32c != destination_blob.crc32c:
to_rewrite_blobs.add(source_blob)
return to_copy_blobs, to_delete_blobs, to_rewrite_blobs
def gcs_object_is_directory(bucket: str) -> bool:
"""
Return True if given Google Cloud Storage URL (gs://<bucket>/<blob>)
is a directory or an empty bucket. Otherwise return False.
"""
_, blob = _parse_gcs_url(bucket)
return len(blob) == 0 or blob.endswith('/')
def _parse_gcs_url(gsurl: str) -> Tuple[str, str]:
"""
Given a Google Cloud Storage URL (gs://<bucket>/<blob>), returns a
tuple containing the corresponding bucket and blob.
"""
parsed_url = urlparse(gsurl)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket name')
if parsed_url.scheme.lower() != "gs":
raise AirflowException(f"Schema must be to 'gs://': Current schema: '{parsed_url.scheme}://'")
bucket = parsed_url.netloc
# Remove leading '/' but NOT trailing one
blob = parsed_url.path.lstrip('/')
return bucket, blob
|
|
"""
This module exposes everything needed to generate a standard django form class
from a formidable object.
Given a formidable object, you can use :func:`get_dynamic_form_class` to get
its corresponding django form class.
"""
from collections import OrderedDict
from django import forms
from django.db.models import Prefetch
from formidable.forms import field_builder
from formidable.forms.conditions import conditions_register
from formidable.models import Access, Formidable, Item
class FormidableBoundFieldCache(dict):
"""
In Django 1.8, bound fields are handled in the form context (__getitem__).
However, we want to inject our own BoundField for FormatField in order to
handle labels differently.
This can be achieved by implementing the get_bound_field method in our
field (available in Django >= 1.9). For now, if the method exists,
the bound_field is switched-in at the form level.
"""
def __setitem__(self, key, bf):
form, field, name = bf.form, bf.field, bf.name
if hasattr(field, 'get_bound_field'):
bf = field.get_bound_field(form, name)
return super().__setitem__(key, bf)
class BaseDynamicForm(forms.Form):
"""
This class is used to generate the final Django form class corresponding to
the formidable object.
Please do not use this class directly, rather, you should check the
endpoint :func:`get_dynamic_form_class`
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._bound_fields_cache = FormidableBoundFieldCache()
def get_removed_fields(self, cleaned_data):
"""
Build the list of fields to be removed due to conditional displays
"""
# build a catalog of fields **targeted** by the conditions
condition_targets = {}
# For each condition, extract its status (should I display or not)
for condition in self._conditions:
# should we keep these fields?
keep_fields = condition.keep_fields(cleaned_data)
for field_id in condition.fields_ids:
# Fill the catalog
if field_id not in condition_targets:
condition_targets[field_id] = []
condition_targets[field_id].append(keep_fields)
# Here, the catalog contains fields targeted by 1 or many conditions.
# If only one condition says "please display X", we'll keep X
# That's why we gather the conditions using "any"
condition_targets = {k: any(v) for k, v in condition_targets.items()}
# We'll only remove fields that are targeted by conditions **and**
# those conditions are false
return (k for k, v in condition_targets.items() if not v)
def clean(self):
cleaned_data = super().clean()
removed_fields = self.get_removed_fields(cleaned_data)
for field_id in removed_fields:
# Remove field from cleaned_data
cleaned_data.pop(field_id, None)
# Remove from eventual existing errors
self.errors.pop(field_id, None)
# The field might have been removed if it was a file field.
if field_id in self.fields:
del self.fields[field_id]
return cleaned_data
def get_dynamic_form_class_from_schema(schema, field_factory=None):
"""
Return a dynamically generated and contextualized form class
"""
attrs = OrderedDict()
field_factory = field_factory or field_builder.FormFieldFactory()
doc = schema['description']
for field in schema['fields']:
try:
form_field = field_factory.produce(field)
except field_builder.SkipField:
pass
else:
attrs[field['slug']] = form_field
conditions = schema.get('conditions', None) or []
attrs['_conditions'] = conditions_register.build(
attrs,
conditions
)
form_class = type(str('DynamicForm'), (BaseDynamicForm,), attrs)
form_class.__doc__ = doc
return form_class
def get_dynamic_form_class(formidable, role=None, field_factory=None):
"""
This is the main method for getting a django form class from a formidable
object.
.. code-block:: python
form_obj = Formidable.objects.get(pk=42)
django_form_class = get_dynamic_form_class(form_obj)
The optional :params:`role` argument provides a way to get the form class
according to the access rights you specify by role. The :params:`role` must
be a role id, as defined by the code pointed to in
settings.FORMIDABLE_ACCESS_RIGHTS_LOADER.
.. code-block:: python
form_obj = Formidable.objects.get(pk=42)
django_form_class = get_dynamic_form_class(form_obj, role='jedi')
"""
attrs = OrderedDict()
field_factory = field_factory or field_builder.FormFieldFactory()
access_qs = Access.objects.all()
if role:
access_qs = access_qs.filter(access_id=role)
fields = formidable.fields.prefetch_related(
Prefetch('items', queryset=Item.objects.order_by('order')),
Prefetch('accesses', queryset=access_qs),
'validations', 'defaults'
)
for field in fields.order_by('order').all():
try:
form_field = field_factory.produce(field, role)
except field_builder.SkipField:
pass
else:
attrs[field.slug] = form_field
conditions_json = formidable.conditions or []
attrs['_conditions'] = conditions_register.build(attrs, conditions_json)
return type(str('DynamicForm'), (BaseDynamicForm,), attrs)
class FormidableForm(forms.Form):
"""
This is the main class available to build a formidable object with Django's
form API syntax.
It provides a class method :meth:`to_formidable` which saves the declared
form as a formidable objects.
Check the formidable.forms.fields module to see what fields are available
when defining your form.
"""
@classmethod
def to_formidable(cls, label=None, description=None, instance=None):
if not instance:
if not label:
raise ValueError("Label is required on creation mode")
description = description or ''
form = Formidable.objects.create(
label=label, description=description
)
else:
form = cls.get_clean_form(instance, label, description)
order = 0
for slug, field in cls.declared_fields.items():
field.to_formidable(form, order, slug)
order += 1
return form
@classmethod
def get_clean_form(cls, form, label, description):
"""
From a form definition and label and description value, the method
clean all fields and validations attached to the form.
If the label or description are not empty, those values are updated
in the database *and* in memory.
The returned object is a form without fields or validations , and
new label and description if needed.
"""
form.fields.all().delete()
if description or label:
kwargs = {
'description': description or form.description,
'label': label or form.label,
}
Formidable.objects.filter(pk=form.pk).update(**kwargs)
form.label = kwargs['label']
form.description = kwargs['description']
return form
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_list_request(
subscription_id, # type: str
resource_group_name, # type: str
cache_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{cacheName}/firewallRules')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
resource_group_name, # type: str
cache_name, # type: str
rule_name, # type: str
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{cacheName}/firewallRules/{ruleName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str'),
"ruleName": _SERIALIZER.url("rule_name", rule_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name, # type: str
cache_name, # type: str
rule_name, # type: str
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{cacheName}/firewallRules/{ruleName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str'),
"ruleName": _SERIALIZER.url("rule_name", rule_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request(
resource_group_name, # type: str
cache_name, # type: str
rule_name, # type: str
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{cacheName}/firewallRules/{ruleName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str'),
"ruleName": _SERIALIZER.url("rule_name", rule_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class FirewallRulesOperations(object):
"""FirewallRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.redis.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name, # type: str
cache_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RedisFirewallRuleListResult"]
"""Gets all firewall rules in the specified redis cache.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cache_name: The name of the Redis cache.
:type cache_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RedisFirewallRuleListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.redis.models.RedisFirewallRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RedisFirewallRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
cache_name=cache_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
cache_name=cache_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("RedisFirewallRuleListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{cacheName}/firewallRules'} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name, # type: str
cache_name, # type: str
rule_name, # type: str
parameters, # type: "_models.RedisFirewallRule"
**kwargs # type: Any
):
# type: (...) -> "_models.RedisFirewallRule"
"""Create or update a redis cache firewall rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cache_name: The name of the Redis cache.
:type cache_name: str
:param rule_name: The name of the firewall rule.
:type rule_name: str
:param parameters: Parameters supplied to the create or update redis firewall rule operation.
:type parameters: ~azure.mgmt.redis.models.RedisFirewallRule
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RedisFirewallRule, or the result of cls(response)
:rtype: ~azure.mgmt.redis.models.RedisFirewallRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RedisFirewallRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'RedisFirewallRule')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
cache_name=cache_name,
rule_name=rule_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RedisFirewallRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RedisFirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{cacheName}/firewallRules/{ruleName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name, # type: str
cache_name, # type: str
rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RedisFirewallRule"
"""Gets a single firewall rule in a specified redis cache.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cache_name: The name of the Redis cache.
:type cache_name: str
:param rule_name: The name of the firewall rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RedisFirewallRule, or the result of cls(response)
:rtype: ~azure.mgmt.redis.models.RedisFirewallRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RedisFirewallRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
cache_name=cache_name,
rule_name=rule_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RedisFirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{cacheName}/firewallRules/{ruleName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name, # type: str
cache_name, # type: str
rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes a single firewall rule in a specified redis cache.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cache_name: The name of the Redis cache.
:type cache_name: str
:param rule_name: The name of the firewall rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
cache_name=cache_name,
rule_name=rule_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{cacheName}/firewallRules/{ruleName}'} # type: ignore
|
|
'''
Channels is where we store information for mapping virtual (qubit) channel to
real channels.
Split from Channels.py on Jan 14, 2016.
Moved to SQLAlchemy ORM from atom 2018
Original Author: Colm Ryan
Modified By: Graham Rowlands
Copyright 2016-2018 Raytheon BBN Technologies
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Include modification to yaml loader (MIT License) from
https://gist.github.com/joshbode/569627ced3076931b02f
Scientific notation fix for yaml from
https://stackoverflow.com/questions/30458977/yaml-loads-5e-6-as-string-and-not-a-number
'''
import sys
import os
import re
import datetime
import traceback
import datetime
import importlib
import inspect
import operator
from functools import wraps, reduce
import itertools
import numpy as np
from scipy.interpolate import interp1d
import networkx as nx
import logging
import bbndb
from bqplot import Figure, LinearScale, ColorScale, Axis, Lines, Figure
from bqplot.marks import Graph, Lines, Label
from ipywidgets import Layout, VBox, HBox
from . import config
from . import Channels
from . import PulseShapes
from ipywidgets import Layout, HTML
from IPython.display import HTML as IPHTML, display
channelLib = None
logger = logging.getLogger("QGL")
def check_session_dirty(f):
"""Since we can't mix db objects from separate sessions, re-fetch entities by their unique IDs"""
@wraps(f)
def wrapper(cls, *args, **kwargs):
if (len(cls.session.dirty | cls.session.new)) == 0:
if 'force' in kwargs:
kwargs.pop('force')
return f(cls, *args, **kwargs)
elif 'force' in kwargs and kwargs['force']:
kwargs.pop('force')
return f(cls, *args, **kwargs)
else:
raise Exception("Uncommitted transactions for working database. Either use force=True or commit/revert your changes.")
return wrapper
def check_for_duplicates(f):
"""Since we can't mix db objects from separate sessions, re-fetch entities by their unique IDs"""
@wraps(f)
def wrapper(cls, label, *args, **kwargs):
if label in cls.channelDict:
logger.warning(f"A database item with the name {label} already exists. Updating parameters of this existing item instead.")
cls.channelDict[label].__dict__.update(kwargs)
return cls.channelDict[label] #should check for difference in args
else:
return f(cls, label, *args, **kwargs)
return wrapper
class ChannelLibrary(object):
def __init__(self, db_resource_name):
"""Create the channel library.
db_resource_name is the filename (without suffix) of the sqlite database use for the channel library.
The .sqlite suffix will automatically be added. Optionally one can be ":memory:" for a purely in-memory
database.
"""
db_provider="sqlite"
global channelLib
if ".sqlite" not in db_resource_name and db_resource_name != ":memory:":
db_resource_name += ".sqlite"
if db_resource_name != ":memory:":
if not os.path.isabs(db_resource_name):
db_resource_name = os.path.abspath(db_resource_name)
logger.info(f"Intializing database at {db_provider}:///{db_resource_name}")
bbndb.initialize_db(f'{db_provider}:///{db_resource_name}')
self.session = bbndb.get_cl_session()
self.connectivityG = nx.DiGraph()
self.db_provider = db_provider
self.db_resource_name = db_resource_name
# Check to see whether there is already a temp database
working_dbs = self.query(Channels.ChannelDatabase, label="working").all()
if len(working_dbs) > 1:
raise Exception("More than one working database exists!")
elif len(working_dbs) == 1:
self.channelDatabase = working_dbs[0]
elif len(working_dbs) == 0:
self.channelDatabase = Channels.ChannelDatabase(label="working", time=datetime.datetime.now())
self.add_and_update_dict(self.channelDatabase)
self.session.commit()
self.update_channelDict()
# Update the global reference
channelLib = self
def query(self, obj_type, **kwargs):
return self.session.query(obj_type).filter_by(**kwargs)
def get_current_channels(self):
return (self.channelDatabase.channels +
self.channelDatabase.generators +
self.channelDatabase.transmitters +
self.channelDatabase.receivers +
self.channelDatabase.transceivers +
self.channelDatabase.instruments +
self.channelDatabase.processors +
self.channelDatabase.attenuators +
self.channelDatabase.DCSources +
self.channelDatabase.spectrum_analyzers)
def update_channelDict(self):
self.channelDict = {c.label: c for c in self.get_current_channels()}
self.build_connectivity_graph()
def ls(self):
cdb = Channels.ChannelDatabase
q = self.session.query(cdb.label, cdb.time, cdb.id, cdb.notes).\
order_by(-Channels.ChannelDatabase.id, Channels.ChannelDatabase.label, Channels.ChannelDatabase.notes).all()
table_code = ""
for i, (label, time, id, notes) in enumerate(q):
y, d, t = map(time.strftime, ["%Y", "%b. %d", "%I:%M:%S %p"])
table_code += f"<tr><td>{id}</td><td>{y}</td><td>{d}</td><td>{t}</td><td>{label}</td><td>{notes}</td></tr>"
display(IPHTML(f"<table><tr><th>id</th><th>Year</th><th>Date</th><th>Time</th><th>Name</th><th>Notes</th></tr><tr>{table_code}</tr></table>"))
def ent_by_type(self, obj_type, show=False):
q = self.session.query(obj_type).filter(obj_type.channel_db.has(label="working")).order_by(obj_type.label).all()
if show:
for i, el in enumerate(q):
print(f"[{i}] -> {el.label}")
else:
return q
def show(self, qubits=[]):
# nodes = list(dgraph.nodes())
edges = []
qub_objs = qubits if not qubits == [] else self.qubits()
for q in qub_objs:
edges.append((q, q.measure_chan))
edges.append((q.measure_chan, q.measure_chan.phys_chan))
edges.append((q.measure_chan.phys_chan,q.measure_chan.phys_chan.transmitter))
edges.append((q, q.phys_chan))
edges.append((q.phys_chan, q.phys_chan.transmitter))
#Generators
if q.measure_chan.phys_chan.generator:
edges.append((q.measure_chan.phys_chan, q.measure_chan.phys_chan.generator))
if q.phys_chan.generator:
edges.append((q.phys_chan, q.phys_chan.generator))
# Triggers
if q.measure_chan.trig_chan:
edges.append((q.measure_chan, q.measure_chan.trig_chan))
graph = nx.digraph.DiGraph()
graph.add_edges_from(edges)
indices = {n: i for i, n in enumerate(graph.nodes())}
node_data = [{'label': str(n).replace('(','\r\n(')} for n in graph.nodes()]
link_data = [{'source': indices[s], 'target': indices[t]} for s, t in graph.edges()]
qub_objs.sort(key=lambda x: x.label)
qubit_names = [q.label for q in qub_objs]
loc = {}
def next_level(nodes, iteration=0, offset=0, accum=[]):
if len(accum) == 0:
loc[nodes[0]] = {'x': 0, 'y': 0}
accum = [nodes]
next_gen_nodes = list(reduce(operator.add, [list(graph.successors(n)) for n in nodes]))
l = len(next_gen_nodes)
if l > 0:
for k,n in enumerate(next_gen_nodes):
loc[n] = {'x': k, 'y': -(iteration+1)}
accum.append(next_gen_nodes)
return next_level(next_gen_nodes, iteration=iteration+1, offset=2.5*l, accum=accum)
else:
return accum
hierarchy = [next_level([q]) for q in qub_objs]
widest = [max([len(row) for row in qh]) for qh in hierarchy]
for i in range(1, len(qub_objs)):
offset = sum(widest[:i])
loc[qub_objs[i]]['x'] += offset*3
for n in nx.descendants(graph, qub_objs[i]):
loc[n]['x'] += offset*3
x = [loc[n]['x'] for n in graph.nodes()]
y = [loc[n]['y'] for n in graph.nodes()]
xs = LinearScale(min=min(x)-0.5, max=max(x)+0.6)
ys = LinearScale(min=min(y)-0.5, max=max(y)+0.6)
fig_layout = Layout(width='960px', height='500px')
bq_graph = Graph(node_data=node_data, link_data=link_data, x=x, y=y, scales={'x': xs, 'y': ys},
link_type='line', colors=['orange'] * len(node_data), directed=False)
bgs_lines = []
middles = []
for i in range(len(qub_objs)):
if i==0:
start = -0.4
end = widest[0]-0.6
elif i == len(qub_objs):
start = sum(widest)-0.4
end = max(x)+0.4
else:
start = sum(widest[:i])-0.4
end = sum(widest[:i+1])-0.6
fig = Figure(marks=[bq_graph], layout=fig_layout)
return fig
def show_connectivity(self, verbose=False):
graph_edges = []
qub_objs = self.qubits()
edges = self.edges()
for e in edges:
graph_edges.append((e.source.label, e.target.label))
table = HTML("<b>Re-evaluate this plot to see information about qubits. Otherwise it will be stale.</b>")
table.add_class("hover_tooltip")
display(IPHTML("""
<style>
.hover_tooltip table { border-collapse: collapse; padding: 8px; }
.hover_tooltip th, .hover_tooltip td { text-align: left; padding: 8px; }
.hover_tooltip tr:nth-child(even) { background-color: #cccccc; padding: 8px; }
</style>
"""))
graph = nx.digraph.DiGraph()
for q in qub_objs:
graph.add_node(q.label, node_obj = q)
graph.add_edges_from(graph_edges)
indices = {n: i for i, n in enumerate(graph.nodes())}
node_data = [{'label': n, 'data': v['node_obj'].print(show=False, verbose=verbose), 'edge_data': v['node_obj'].print_edges(show=False, verbose=verbose, edges = [e for e in self.edges() if e.source.label == n or e.target.label == n]
)} for n,v in graph.nodes(True)] # fix edges
link_data = [{'source': indices[s], 'target': indices[t]} for s, t in graph.edges()]
qub_objs.sort(key=lambda x: x.label)
qubit_names = [q.label for q in qub_objs]
loc = {}
nqubits = len(qub_objs)
dtheta = 2*np.pi/nqubits
rho = 4
x = [rho*np.cos(dtheta*ind) for ind,n in enumerate(qub_objs)]
y = [rho*np.sin(dtheta*ind) for ind,n in enumerate(qub_objs)]
hovered_symbol = ''
def hover_handler(self, content, hovered_symbol=hovered_symbol, table=table):
symbol = content.get('data', '')
if(symbol != hovered_symbol):
hovered_symbol = symbol
table.value = symbol['data']
def click_handler(self, content, hovered_symbol=hovered_symbol, table=table):
symbol = content.get('data', '')
if(symbol != hovered_symbol):
hovered_symbol = symbol
table.value = symbol['edge_data']
xs = LinearScale(min=min(x)-0.5, max=max(x)+0.6)
ys = LinearScale(min=min(y)-0.5, max=max(y)+0.6)
fig_layout = Layout(width='500px', height='500px')
cs = ColorScale(scheme = 'PuBuGn')
bq_graph = Graph(node_data=node_data, link_data=link_data, x=x, y=y,scales={'x':xs, 'y':ys, 'color': cs},
link_type='line', color=np.linspace(0,1,len(node_data)), directed=True)
bgs_lines = []
middles = []
bq_graph.tooltip = table
bq_graph.on_hover(hover_handler)
bq_graph.on_element_click(click_handler)
fig = Figure(marks=[bq_graph], layout=fig_layout)
return fig
def show_frequency_plan(self):
c_freqs = {}
m_freqs = {}
for qubit in self.qubits():
c_freqs[qubit.label] = qubit.frequency*1e-9
if qubit.phys_chan.generator:
c_freqs[qubit.label] += qubit.phys_chan.generator.frequency*1e-9
m_freqs[qubit.label] = qubit.measure_chan.frequency*1e-9
if qubit.measure_chan.phys_chan.generator:
m_freqs[qubit.label] += qubit.measure_chan.phys_chan.generator.frequency*1e-9
def spike_at(f):
fs = np.linspace(f-0.02,f+0.02,50)
return fs, np.exp(-(fs-f)**2/0.01**2)
figs = []
for freqs, ss in zip([c_freqs, m_freqs],["Control","Measure"]):
sx = LinearScale()
sy = LinearScale()
ax = Axis(scale=sx, label="Frequency (GHz)")
ay = Axis(scale=sy, orientation='vertical')
lines = []
for k,f in freqs.items():
fs, a = spike_at(f)
lines.append(Lines(x=fs, y=a, scales={'x': sx, 'y': sy}))
labels = Label(x=list(freqs.values()), y=[1.1 for f in freqs], text=list(freqs.keys()), align='middle', scales= {'x': sx, 'y': sy},
default_size=14, font_weight='bolder', colors=['#4f6367'])
figs.append(Figure(marks=lines+[labels], axes=[ax, ay], title=f"{ss} Frequency Plan"))
return HBox(figs)
def diff(self, name1, name2, index1=1, index2=1):
'''
Compare 2 channel library versions. Print the difference between 2 libraries, including parameter values and channel allocations. It requires both versions to be saved in the same sqlite database.
Args
name1: name of first version to compare
name2: name of second version to compare
index1, index2: by default, loading the most recent instances for the given names. Specifying index1/2 = 2 will select the second most recent instance etc."""
'''
cdb = Channels.ChannelDatabase
db1 = self.session.query(cdb).filter(cdb.label==name1).order_by(cdb.time.asc())[-1*index1]
db2 = self.session.query(cdb).filter(cdb.label==name2).order_by(cdb.time.asc())[-1*index2]
copied_db1 = bbndb.deepcopy_sqla_object(db1)
copied_db2 = bbndb.deepcopy_sqla_object(db2)
dict_1 = {c.label: c for c in copied_db1.channels + copied_db1.all_instruments()}
dict_2 = {c.label: c for c in copied_db2.channels + copied_db2.all_instruments()}
def iter_diff(value_iter1, value_iter2, ct, label=''):
table_code = ''
for key, key2 in zip(value_iter1, value_iter2):
if key in ['_sa_instance_state', 'channel_db']:
continue
if isinstance(value_iter1, dict):
cmp1 = value_iter1[key]
cmp2 = value_iter2[key]
if label in value_iter1:
label = value_iter1['label']
elif isinstance(value_iter1, list):
cmp1 = key
cmp2 = key2 #TODO fix. why would they be in any order?
else:
cmp1 = getattr(value_iter1, key)
cmp2 = getattr(value_iter2, key)
if (cmp1 == None) ^ (cmp2 == None):
table_code += f"<tr><td>{label}</td><td>{key}</td><td>{cmp1}</td><td>{cmp2}</td></tr>"
continue
if (cmp1 == None) or (cmp2 == None) or ((isinstance(cmp1, dict) or isinstance(cmp1, list)) and len(cmp1) == 0):
continue
if isinstance(cmp1, (bbndb.qgl.DatabaseItem, bbndb.qgl.Channel, bbndb.qgl.Instrument)):
cmp1 = cmp1.__dict__
cmp2 = cmp2.__dict__
if isinstance(cmp1, (dict, list, bbndb.qgl.DatabaseItem, bbndb.qgl.Channel, bbndb.qgl.Instrument)):
if ct<1: # up to 2 recursion levels for now, to avoid infinite loops for bidirectional relations
ct+=1
table_code += iter_diff(cmp1, cmp2, ct, label=label)
continue
if cmp1 != cmp2:
table_code += f"<tr><td>{label}</td><td>{key}</td><td>{cmp1}</td><td>{cmp2}</td></tr>"
return table_code
table_code = ''
for chan in set(list(dict_1.keys()) + list(dict_2.keys())):
if chan not in dict_1 or chan not in dict_2: # don't display differences of unique channels
continue
this_dict1 = dict_1[chan].__dict__
this_dict2 = dict_2[chan].__dict__
ct = 0
table_code += iter_diff(this_dict1, this_dict2, ct, chan)
display(HTML(f"<table><tr><th>Object</th><th>Parameter</th><th>{name1}</th><th>{name2}</th></tr><tr>{table_code}</tr></table>"))
def receivers(self):
return self.ent_by_type(Channels.Receiver)
def transmitters(self):
return self.ent_by_type(Channels.Transmitter)
def transceivers(self):
return self.ent_by_type(Channels.Transceiver)
def qubits(self):
return self.ent_by_type(Channels.Qubit)
def edges(self):
return self.ent_by_type(Channels.Edge)
def meas(self):
return self.ent_by_type(Channels.Measurement)
def markers(self):
return self.ent_by_type(Channels.LogicalMarkerChannel)
@check_session_dirty
def load(self, name, index=1):
"""Load the latest instance for a particular name. Specifying index = 2 will select the second most recent instance """
cdb = Channels.ChannelDatabase
items = self.session.query(cdb).filter(cdb.label==name).order_by(cdb.time.asc()).all()
self.load_obj(items[-index])
@check_session_dirty
def load_by_id(self, id_num):
item = self.session.query(Channels.ChannelDatabase).filter_by(id=id_num).first()
self.load_obj(item)
def clear(self, channel_db=None, create_new=True):
# If no database is specified, clear self.database
channel_db = channel_db if channel_db else self.channelDatabase
self.session.delete(channel_db)
self.session.commit()
if create_new:
self.channelDatabase = Channels.ChannelDatabase(label="working", time=datetime.datetime.now())
self.add_and_update_dict(self.channelDatabase)
self.session.commit()
channelLib = self
def rm(self, library_name, keep_id=-1):
"""Remove the channel library named `library_name`. If no `keep_version` is specified then
all versions are removed. Otherwise """
cdb = Channels.ChannelDatabase
items = self.session.query(cdb).filter(cdb.label==library_name and cdb.id!=keep_id).all()
for item in items:
self.session.delete(item)
def rm_by_id(self, id):
"""Remove the channel library with id `id`"""
item = self.session.query(Channels.ChannelDatabase).filter_by(id=id_num).first()
self.session.delete(item)
def load_obj(self, obj):
self.clear(create_new=False)
self.channelDatabase = bbndb.deepcopy_sqla_object(obj, self.session)
self.channelDatabase.label = "working"
self.session.commit()
self.update_channelDict()
def commit(self):
self.session.commit()
self.update_channelDict()
def revert(self):
self.session.rollback()
@check_session_dirty
def save_as(self, name, notes = ''):
if name == "working":
raise ValueError("Cannot save as `working` since that is the default working environment name...")
self.commit()
new_channelDatabase = bbndb.deepcopy_sqla_object(self.channelDatabase, self.session)
new_channelDatabase.label = name
new_channelDatabase.time = datetime.datetime.now()
new_channelDatabase.notes = notes
self.commit()
def add_and_update_dict(self, el):
if isinstance(el, list):
self.session.add_all(el)
else:
self.session.add(el)
self.update_channelDict()
#Dictionary methods
def __getitem__(self, key):
return self.channelDict[key]
def __setitem__(self, key, value):
self.channelDict[key] = value
def __delitem__(self, key):
del self.channelDict[key]
def __contains__(self, key):
return key in self.channelDict
def keys(self):
return self.channelDict.keys()
def values(self):
return self.channelDict.values()
def build_connectivity_graph(self):
# build connectivity graph
for chan in self.session.query(Channels.Qubit).filter(Channels.Qubit not in self.connectivityG).all():
self.connectivityG.add_node(chan)
for chan in self.session.query(Channels.Edge): #select(e for e in Channels.Edge):
self.connectivityG.add_edge(chan.source, chan.target)
self.connectivityG[chan.source][chan.target]['channel'] = chan
@check_for_duplicates
def new_APS3(self, label, address, serial_port, dac, **kwargs):
chan1 = Channels.PhysicalQuadratureChannel(label=f"{label}-1", channel=0, instrument=label, translator="APS3Pattern", sampling_rate=2.5e9, channel_db=self.channelDatabase)
m1 = Channels.PhysicalMarkerChannel(label=f"{label}-m1", channel=0, instrument=label, translator="APS3Pattern", sampling_rate=2.5e9, channel_db=self.channelDatabase)
this_transmitter = Channels.Transmitter(label=label, model="APS3", address=address, serial_port=serial_port, dac=dac, channels=[chan1, m1], channel_db=self.channelDatabase, **kwargs)
this_transmitter.trigger_source = 'external' if 'trigger_source' not in kwargs else kwargs['trigger_source']
self.add_and_update_dict(this_transmitter)
return this_transmitter
@check_for_duplicates
def new_APS2(self, label, address, **kwargs):
chan1 = Channels.PhysicalQuadratureChannel(label=f"{label}-1", channel=0, instrument=label, translator="APS2Pattern", channel_db=self.channelDatabase)
m1 = Channels.PhysicalMarkerChannel(label=f"{label}-m1", channel=0, instrument=label, translator="APS2Pattern", channel_db=self.channelDatabase)
m2 = Channels.PhysicalMarkerChannel(label=f"{label}-m2", channel=1, instrument=label, translator="APS2Pattern", channel_db=self.channelDatabase)
m3 = Channels.PhysicalMarkerChannel(label=f"{label}-m3", channel=2, instrument=label, translator="APS2Pattern", channel_db=self.channelDatabase)
m4 = Channels.PhysicalMarkerChannel(label=f"{label}-m4", channel=3, instrument=label, translator="APS2Pattern", channel_db=self.channelDatabase)
this_transmitter = Channels.Transmitter(label=label, model="APS2", address=address, channels=[chan1, m1, m2, m3, m4], channel_db=self.channelDatabase, **kwargs)
this_transmitter.trigger_source = "external"
this_transmitter.address = address
self.add_and_update_dict(this_transmitter)
return this_transmitter
@check_for_duplicates
def new_APS(self, label, address, **kwargs):
chan1 = Channels.PhysicalQuadratureChannel(label=f"{label}-12", channel = 0, instrument=label, translator="APSPattern", channel_db=self.channelDatabase)
chan2 = Channels.PhysicalQuadratureChannel(label=f"{label}-34", channel = 1, instrument=label, translator="APSPattern", channel_db=self.channelDatabase)
m1 = Channels.PhysicalMarkerChannel(label=f"{label}-1m1", channel=0, instrument=label, translator="APSPattern", channel_db=self.channelDatabase)
m2 = Channels.PhysicalMarkerChannel(label=f"{label}-2m1", channel=1, instrument=label, translator="APSPattern", channel_db=self.channelDatabase)
m3 = Channels.PhysicalMarkerChannel(label=f"{label}-3m1", channel=2, instrument=label, translator="APSPattern", channel_db=self.channelDatabase)
m4 = Channels.PhysicalMarkerChannel(label=f"{label}-4m1", channel=3, instrument=label, translator="APSPattern", channel_db=self.channelDatabase)
this_transmitter = Channels.Transmitter(label=label, model="APS", address=address, channels=[chan1, chan2, m1, m2, m3, m4], channel_db=self.channelDatabase)
this_transmitter.trigger_source = "external"
this_transmitter.address = address
self.add_and_update_dict(this_transmitter)
return this_transmitter
@check_for_duplicates
def new_TDM(self, label, address, trigger_interval=250e-6, **kwargs):
chans = []
for k in range(7): # TDM has 7 digital inputs
chans.append(Channels.DigitalInput(label=f"DigitalInput-{label}-{k}", channel=k, channel_db=self.channelDatabase))
tdm = Channels.Processor(label=label, model="TDM", address=address, trigger_interval=trigger_interval, channels=chans, channel_db=self.channelDatabase)
self.add_and_update_dict(tdm)
return tdm
@check_for_duplicates
def new_spectrum_analyzer(self, label, address, source, model="SpectrumAnalyzer", **kwargs):
sa = Channels.SpectrumAnalyzer(label=label, model=model, address=address, LO_source=source, channel_db=self.channelDatabase, **kwargs)
self.add_and_update_dict(sa)
return sa
@check_for_duplicates
def new_DC_source(self, label, address, **kwargs):
dcsource = Channels.DCSource(label=label, model="YokogawaGS200", address=address, standalone=True, channel_db=self.channelDatabase, **kwargs)
self.add_and_update_dict(dcsource)
return dcsource
@check_for_duplicates
def new_attenuator(self,label,address,attenuation=0):
chan1 = Channels.AttenuatorChannel(label=f"AttenChan-{label}-1", channel=1, attenuation=attenuation, channel_db=self.channelDatabase)
chan2 = Channels.AttenuatorChannel(label=f"AttenChan-{label}-2", channel=2, attenuation=attenuation, channel_db=self.channelDatabase)
chan3 = Channels.AttenuatorChannel(label=f"AttenChan-{label}-3", channel=3, attenuation=attenuation, channel_db=self.channelDatabase)
thing = Channels.Attenuator(label=label,model="DigitalAttenuator",address=address,channels=[chan1, chan2, chan3], standalone=True, channel_db=self.channelDatabase)
self.add_and_update_dict(thing)
return thing
@check_for_duplicates
def new_APS2_rack(self, label, ip_addresses, tdm_ip=None, **kwargs):
transmitters = [self.new_APS2(f"{label}_U{n+1}", f"{ip}") for n, ip in enumerate(ip_addresses)]
this_transceiver = Channels.Transceiver(label=label, model="APS2", master=True, address=ip_addresses[0], transmitters=transmitters, channel_db=self.channelDatabase, **kwargs)
for t in transmitters:
t.transceiver = this_transceiver
if tdm_ip:
tdm = self.new_TDM(f"{label}_TDM", tdm_ip)
this_transceiver.processors = [tdm]
for t in transmitters:
t.trigger_source = 'system'
self.add_and_update_dict(this_transceiver)
return this_transceiver
@check_for_duplicates
def new_transceiver(self, model, label, address, numtx=1, numrx=1, nummark=4,
record_length = 1024, reference_freq=10e6, tx_sampling_rate=500e6, rx_sampling_rate=1e9, **kwargs):
translator = model+"Pattern"
stream_sel = model+"StreamSelector"
chans = []
for i in range(numtx):
chan = Channels.PhysicalQuadratureChannel(label=f"{label}-Tx{i+1:02d}-1", instrument=label, channel=i,
sampling_rate=tx_sampling_rate, translator=translator, channel_db=self.channelDatabase)
chans.append(chan)
for i in range(nummark):
chan = Channels.PhysicalMarkerChannel(label=f"{label}-Tx{i+1:02d}-M", channel=i, instrument=label,
translator=translator, channel_db=self.channelDatabase)
chans.append(chan)
transmitter = Channels.Transmitter(label=f"{label}-Tx", model=model, address=address, channels=chans,
channel_db=self.channelDatabase)
transmitter.trigger_source = "external"
transmitter.address = address
chans = []
for i in range(numrx):
chan = Channels.ReceiverChannel(label=f"RecvChan-{label}-{i+1:02d}", channel=i, channel_db=self.channelDatabase)
chans.append(chan)
receiver = Channels.Receiver(label=f"{label}-Rx", model=model, address=address, channels=chans,
sampling_rate=rx_sampling_rate, reference_freq=reference_freq, record_length=record_length, channel_db=self.channelDatabase)
receiver.trigger_source = "external"
receiver.stream_types = "raw"
receiver.address = address
receiver.stream_sel = stream_sel
transceiver = Channels.Transceiver(label=label, address=address, model=model, transmitters=[transmitter],
receivers = [receiver], initialize_separately=False, channel_db=self.channelDatabase)
transmitter.transceiver = transceiver
receiver.transceiver = transceiver
transceiver.master = True
transceiver._locked = False
self.add_and_update_dict(transceiver)
return transceiver
@check_for_duplicates
def new_X6(self, label, address, dsp_channel=0, record_length=1024, **kwargs):
phys_channels = (1, 2)
chans = []
for phys_chan in (1,2):
chans.append(Channels.ReceiverChannel(label=f"RecvChan-{label}-{phys_chan}",
channel=phys_chan, channel_db=self.channelDatabase))
this_receiver = Channels.Receiver(label=label, model="X6", address=address, channels=chans,
record_length=record_length, channel_db=self.channelDatabase, **kwargs)
this_receiver.trigger_source = "external"
this_receiver.stream_types = "raw, demodulated, integrated"
this_receiver.address = address
this_receiver.stream_sel = "X6StreamSelector"
self.add_and_update_dict(this_receiver)
return this_receiver
@check_for_duplicates
def new_Alazar(self, label, address, record_length=1024, **kwargs):
chan1 = Channels.ReceiverChannel(label=f"RecvChan-{label}-1", channel=1, channel_db=self.channelDatabase)
chan2 = Channels.ReceiverChannel(label=f"RecvChan-{label}-2", channel=2, channel_db=self.channelDatabase)
this_receiver = Channels.Receiver(label=label, model="AlazarATS9870", address=address, channels=[chan1, chan2],
record_length=record_length, channel_db=self.channelDatabase, **kwargs)
this_receiver.trigger_source = "external"
this_receiver.stream_types = "raw"
this_receiver.address = address
this_receiver.stream_sel = "AlazarStreamSelector"
self.add_and_update_dict(this_receiver)
return this_receiver
@check_for_duplicates
def new_qubit(self, label, **kwargs):
thing = Channels.Qubit(label=label, channel_db=self.channelDatabase, **kwargs)
self.add_and_update_dict(thing)
return thing
@check_for_duplicates
def new_logical_channel(self, label, **kwargs):
thing = Channels.LogicalChannel(label=label, channel_db=self.channelDatabase, **kwargs)
self.add_and_update_dict(thing)
return thing
@check_for_duplicates
def new_marker(self, label, phys_chan, **kwargs):
thing = Channels.LogicalMarkerChannel(label=label, phys_chan = phys_chan, channel_db=self.channelDatabase, **kwargs)
self.add_and_update_dict(thing)
return thing
@check_for_duplicates
def new_source(self, label, model, address, power=-30.0, frequency=5.0e9, reference='10MHz', **kwargs):
thing = Channels.Generator(label=label, model=model, address=address, power=power,
frequency=frequency, reference=reference,
channel_db=self.channelDatabase, **kwargs)
self.add_and_update_dict(thing)
return thing
def set_control(self, qubit_or_edge, transmitter, generator=None):
if isinstance(transmitter, Channels.Transmitter):
quads = [c for c in transmitter.channels if isinstance(c, Channels.PhysicalQuadratureChannel)]
markers = [c for c in transmitter.channels if isinstance(c, Channels.PhysicalMarkerChannel)]
if len(quads) > 1:
raise ValueError("In set_control the Transmitter must have a single quadrature channel or a specific channel must be passed instead")
elif len(quads) == 1:
phys_chan = quads[0]
elif isinstance(transmitter, Channels.PhysicalQuadratureChannel):
phys_chan = transmitter
markers = [c for c in transmitter.transmitter.channels if isinstance(c, Channels.PhysicalMarkerChannel)]
else:
raise ValueError("In set_control the Transmitter must have a single quadrature channel or a specific channel must be passed instead")
qubit_or_edge.phys_chan = phys_chan
if generator:
qubit_or_edge.phys_chan.generator = generator
self.update_channelDict()
def set_bias(self, qubit, bias=None, frequency=None):
"""
Set either qubit frequency or DC bias given the other, reading the values or interpolating from qubit.bias_pairs.
qubit: qubit bias/frequency to be set
bias (option 1): set the DC bias of the associated qubit.DCsource and the qubit control generator to the corresponding frequency
frequency (option 2): set the qubit control generator (accounting for SSB) and the associated DCsource to the corresponding value
"""
if not isinstance(qubit, Channels.Qubit):
raise ValueError("Set DC bias for a qubit only")
if not qubit.bias_pairs:
raise ValueError("Bias - frequency pairs not defined")
if bool(bias) and bool(frequency):
raise ValueError("Choose either DC bias or qubit frequency")
bias_pairs = sorted(qubit.bias_pairs.items())
biases = [k[0] for k in bias_pairs]
freqs_q = [k[1]['freq_q'] for k in bias_pairs]
freqs_r = [k[1]['freq_r'] for k in bias_pairs]
qubit.phys_chan.generator.frequency = frequency if frequency else interp1d(biases, freqs_q)([bias])[0]
qubit.phys_chan.generator.frequency -= qubit.frequency
qubit.bias_source.level = bias if bias else interp1d(freqs_q, biases)([frequency])[0]
qubit.measure_chan.phys_chan.generator.frequency = interp1d(biases, freqs_r)([qubit.bias_source.level])[0]
qubit.measure_chan.phys_chan.generator.frequency -= qubit.measure_chan.autodyne_freq
def new_edge(self, source, target, cnot_impl=None):
"""
Create a new edge connecting two qubits
source (Qubit): logical channel for source qubit
target (Qubit): logical channel for target qubit
cnot_impl (string, optional): function name for CNOT implementation, overriding the default in QGL/config.py
"""
label = f"{source.label}->{target.label}"
if label in self.channelDict:
edge = self.channelDict[f"{source.label}->{target.label}"]
logger.warning(f"The edge {source.label}->{target.label} already exists: using this edge.")
else:
edge = Channels.Edge(label=f"{source.label}->{target.label}", source=source, target=target, channel_db=self.channelDatabase, cnot_impl=cnot_impl)
self.add_and_update_dict(edge)
return edge
def set_qubit_connectivity(self, graph):
"""
Graph is a networkx DiGraph consisting of edges (source qubit, target qubit)
"""
new_edges = [Channels.Edge(label=f"{source.label}->{target.label}", source=source, target=target) for source, target in graph.edges()]
self.add_and_update_dict(new_edges)
return new_edges
def set_measure(self, qubit, transmitter, receivers, generator=None, trig_channel=None, gate=False, gate_channel=None, trigger_length=1e-7, tdm_chan=None):
if isinstance(transmitter, Channels.Transmitter):
quads = [c for c in transmitter.channels if isinstance(c, Channels.PhysicalQuadratureChannel)]
markers = [c for c in transmitter.channels if isinstance(c, Channels.PhysicalMarkerChannel)]
if len(quads) > 1:
raise ValueError("In set_measure the Transmitter must have a single quadrature channel or a specific channel must be passed instead")
elif len(quads) == 1:
phys_chan = quads[0]
elif isinstance(transmitter, Channels.PhysicalQuadratureChannel):
phys_chan = transmitter
markers = [c for c in transmitter.transmitter.channels if isinstance(c, Channels.PhysicalMarkerChannel)]
else:
raise ValueError("In set_measure the Transmitter must have a single quadrature channel or a specific channel must be passed instead")
if f"M-{qubit.label}" in self.channelDict:
logger.warning(f"The measurement M-{qubit.label} already exists: using this measurement.")
meas = self.channelDict[f"M-{qubit.label}"]
else:
meas = Channels.Measurement(label=f"M-{qubit.label}", channel_db=self.channelDatabase)
meas.phys_chan = phys_chan
if generator:
meas.phys_chan.generator = generator
phys_trig_channel = trig_channel if trig_channel else transmitter.get_chan("m1")
if f"ReceiverTrig-{qubit.label}" in self.channelDict:
logger.warning(f"The Receiver trigger ReceiverTrig-{qubit.label} already exists: using this channel.")
trig_chan = self.channelDict[f"ReceiverTrig-{qubit.label}"]
else:
trig_chan = Channels.LogicalMarkerChannel(label=f"ReceiverTrig-{qubit.label}", channel_db=self.channelDatabase)
self.session.add(trig_chan)
trig_chan.phys_chan = phys_trig_channel
trig_chan.pulse_params = {"length": trigger_length, "shape_fun": "constant"}
meas.trig_chan = trig_chan
qubit.measure_chan = meas
if isinstance(receivers, Channels.Receiver) and len(receivers.channels) > 1:
raise ValueError("In set_measure the Receiver must have a single receiver channel or a specific channel must be passed instead")
elif isinstance(receivers, Channels.Receiver) and len(receivers.channels) == 1:
rcv_chan = receivers.channels[0]
elif isinstance(receivers, Channels.ReceiverChannel):
rcv_chan = receivers
else:
raise ValueError("In set_measure the Transmitter must have a single quadrature channel or a specific channel must be passed instead")
meas.receiver_chan = rcv_chan
self.add_and_update_dict([meas, trig_chan])
if gate:
phys_gate_channel = gate_channel if gate_channel else transmitter.get_chan("m2")
if f"M-{qubit.label}-gate" in self.channelDict:
logger.warning(f"The gate channel M-{qubit.label}-gate already exists: using this channel.")
gate_chan = self.channelDict[f"M-{qubit.label}-gate"]
gate_chan = Channels.LogicalMarkerChannel(label=f"M-{qubit.label}-gate", channel_db=self.channelDatabase)
gate_chan.phys_chan = phys_gate_channel
meas.gate_chan = gate_chan
self.add_and_update_dict([gate_chan])
if tdm_chan:
if isinstance(tdm_chan, Channels.DigitalInput):
phys_tdm_channel = tdm_chan
else:
if not hasattr(self.channelDatabase, 'processors') or not self.channelDatabase.processors:
raise ValueError(f"No processor is defined")
elif len(self.channelDatabase.processors) > 1:
raise ValueError(f"Multiple processors are defined. Please specify digital input channel.")
else:
tdm = self.channelDatabase.processors[0]
phys_tdm_channel = tdm.get_chan(tdm_chan)
meas.processor_chan = phys_tdm_channel
self.add_and_update_dict([meas, phys_tdm_channel])
def set_master(self, master_instrument, trig_channel=None, pulse_length=1e-7):
if isinstance(master_instrument, Channels.Processor):
master_instrument.master = True
elif trig_channel:
if not isinstance(trig_channel, Channels.PhysicalMarkerChannel):
raise ValueError("In set_master the trigger channel must be an instance of PhysicalMarkerChannel")
if "slave_trig" in self.channelDict:
logger.warning(f"The slave trigger slave_trig already exists: using this trigger.")
st = self.channelDict["slave_trig"]
else:
st = Channels.LogicalMarkerChannel(label="slave_trig", channel_db=self.channelDatabase)
st.phys_chan = trig_channel
st.pulse_params = {"length": pulse_length, "shape_fun": "constant"}
master_instrument.master = True
master_instrument.trigger_source = "internal"
self.add_and_update_dict([st])
else:
raise ValueError(f"Could not determine which transmitter to set as master for {master_instrument}:{trig_channel}")
# Used by QGL2, which needs a non-class member function to
# retrieve a Qubit from the CL without accessing the CL directly
def QubitFactory(label):
''' Return a saved qubit channel'''
if channelLib is None:
raise Exception("No channel library initialized")
channelLib.update_channelDict()
# cs = [c for c in channelLib.channelDatabase.channels if c.label==label]
cs = [c for c in channelLib.channelDatabase.channels if c.label==label and isinstance(c, Channels.Qubit)]
# q = channelLib.session.query(Channels.Qubit).filter(Channels.Qubit.label==label and Channels.Qubit.channel_db==channelLib.channelDatabase).all()
if len(cs) == 1:
return cs[0]
else:
raise Exception(f"Expected to find a single qubit '{label}' but found {len(cs)} qubits with the same label instead.")
def MeasFactory(label):
''' Return a saved measurement channel.'''
if channelLib is None:
raise Exception("No channel library initialized")
channelLib.update_channelDict()
# cs = [c for c in channelLib.channelDatabase.channels if c.label==label]
cs = [c for c in channelLib.channelDatabase.channels if c.label==label and isinstance(c, Channels.Measurement)]
# q = channelLib.session.query(Channels.Qubit).filter(Channels.Qubit.label==label and Channels.Qubit.channel_db==channelLib.channelDatabase).all()
if len(cs) == 1:
return cs[0]
else:
raise Exception(f"Expected to find a single measurement '{label}' but found {len(cs)} measurements with the same label instead.")
def MarkerFactory(label):
''' Return a saved Marker channel with this label. '''
if channelLib is None:
raise Exception("No channel library initialized")
# cs = [c for c in channelLib.channelDatabase.channels if c.label==label]
cs = [c for c in channelLib.channelDatabase.channels if c.label==label and isinstance(c, Channels.LogicalMarkerChannel)]
channelLib.update_channelDict()
# q = channelLib.session.query(Channels.Qubit).filter(Channels.Qubit.label==label and Channels.Qubit.channel_db==channelLib.channelDatabase).all()
if len(cs) == 1:
return cs[0]
else:
raise Exception(f"Expected to find a single marker '{label}' but found {len(cs)} markers with the same label instead.")
def EdgeFactory(source, target):
if channelLib is None:
raise Exception("No channel library initialized")
channelLib.update_channelDict()
if channelLib.connectivityG.has_edge(source, target):
return channelLib.connectivityG[source][target]['channel']
elif channelLib.connectivityG.has_edge(target, source):
return channelLib.connectivityG[target][source]['channel']
else:
raise ValueError('Edge {0} not found in connectivity graph'.format((
source, target)))
|
|
#!/usr/bin/env python
#
# Utility script for ESP-IDF developers to work with the CODEOWNERS file.
#
# SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import re
import subprocess
import sys
from idf_ci_utils import IDF_PATH
CODEOWNERS_PATH = os.path.join(IDF_PATH, '.gitlab', 'CODEOWNERS')
CODEOWNER_GROUP_PREFIX = '@esp-idf-codeowners/'
def get_all_files():
"""
Get list of all file paths in the repository.
"""
# only split on newlines, since file names may contain spaces
return subprocess.check_output(['git', 'ls-files'], cwd=IDF_PATH).decode('utf-8').strip().split('\n')
def pattern_to_regex(pattern):
"""
Convert the CODEOWNERS path pattern into a regular expression string.
"""
orig_pattern = pattern # for printing errors later
# Replicates the logic from normalize_pattern function in Gitlab ee/lib/gitlab/code_owners/file.rb:
if not pattern.startswith('/'):
pattern = '/**/' + pattern
if pattern.endswith('/'):
pattern = pattern + '**/*'
# Convert the glob pattern into a regular expression:
# first into intermediate tokens
pattern = (pattern.replace('**/', ':REGLOB:')
.replace('**', ':INVALID:')
.replace('*', ':GLOB:')
.replace('.', ':DOT:')
.replace('?', ':ANY:'))
if pattern.find(':INVALID:') >= 0:
raise ValueError("Likely invalid pattern '{}': '**' should be followed by '/'".format(orig_pattern))
# then into the final regex pattern:
re_pattern = (pattern.replace(':REGLOB:', '(?:.*/)?')
.replace(':GLOB:', '[^/]*')
.replace(':DOT:', '[.]')
.replace(':ANY:', '.') + '$')
if re_pattern.startswith('/'):
re_pattern = '^' + re_pattern
return re_pattern
def files_by_regex(all_files, regex):
"""
Return all files in the repository matching the given regular expresion.
"""
return [file for file in all_files if regex.search('/' + file)]
def files_by_pattern(all_files, pattern=None):
"""
Return all the files in the repository matching the given CODEOWNERS pattern.
"""
if not pattern:
return all_files
return files_by_regex(all_files, re.compile(pattern_to_regex(pattern)))
def action_identify(args):
best_match = []
all_files = get_all_files()
with open(CODEOWNERS_PATH) as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
tokens = line.split()
path_pattern = tokens[0]
owners = tokens[1:]
files = files_by_pattern(all_files, path_pattern)
if args.path in files:
best_match = owners
for owner in best_match:
print(owner)
def action_test_pattern(args):
re_pattern = pattern_to_regex(args.pattern)
if args.regex:
print(re_pattern)
return
files = files_by_regex(get_all_files(), re.compile(re_pattern))
for f in files:
print(f)
def action_ci_check(args):
errors = []
def add_error(msg):
errors.append('{}:{}: {}'.format(CODEOWNERS_PATH, line_no, msg))
all_files = get_all_files()
prev_path_pattern = ''
with open(CODEOWNERS_PATH) as f:
for line_no, line in enumerate(f, start=1):
# Skip empty lines and comments
line = line.strip()
if line.startswith('# sort-order-reset'):
prev_path_pattern = ''
if (not line
or line.startswith('#') # comment
or line.startswith('[') # file group
or line.startswith('^[')): # optional file group
continue
# Each line has a form of "<path> <owners>+"
tokens = line.split()
path_pattern = tokens[0]
owners = tokens[1:]
if not owners:
add_error('no owners specified for {}'.format(path_pattern))
# Check that the file is sorted by path patterns
if not in_order(prev_path_pattern, path_pattern):
add_error('file is not sorted: {} < {}'.format(path_pattern, prev_path_pattern))
prev_path_pattern = path_pattern
# Check that the pattern matches at least one file
files = files_by_pattern(all_files, path_pattern)
if not files:
add_error('no files matched by pattern {}'.format(path_pattern))
for o in owners:
# Sanity-check the owner group name
if not o.startswith(CODEOWNER_GROUP_PREFIX):
add_error("owner {} doesn't start with {}".format(o, CODEOWNER_GROUP_PREFIX))
if not errors:
print('No errors found.')
else:
print('Errors found!')
for e in errors:
print(e)
raise SystemExit(1)
def in_order(prev, current):
"""
Return True if the ordering is correct for these two lines ('prev' should be before 'current').
Codeowners should be ordered alphabetically, except that order is also significant for the codeowners
syntax (the last matching line has priority).
This means that wildcards are allowed in either order (if wildcard placed first, it's placed before a
more specific pattern as a catch-all fallback. If wildcard placed second, it's to override the match
made on a previous line i.e. '/xyz/**/*.py' to override the owner of the Python files inside /xyz/ ).
"""
if not prev:
return True # first element in file
def is_separator(c):
return c in '-_/' # ignore differences between separators for ordering purposes
def is_wildcard(c):
return c in '?*'
# looping until we see a different character
for a,b in zip(prev, current):
if is_separator(a) and is_separator(b):
continue
if is_wildcard(a) or is_wildcard(b):
return True # if the strings matched up to one of them having a wildcard, treat as in order
if a != b:
return b > a
assert a == b
# common substrings up to the common length are the same, so the longer string should be after
return len(current) >= len(prev)
def main():
parser = argparse.ArgumentParser(
sys.argv[0], description='Internal helper script for working with the CODEOWNERS file.'
)
subparsers = parser.add_subparsers(dest='action')
identify = subparsers.add_parser(
'identify',
help='List the owners of the specified path within IDF.'
"This command doesn't support files inside submodules, or files not added to git repository.",
)
identify.add_argument('path', help='Path of the file relative to the root of the repository')
subparsers.add_parser(
'ci-check',
help='Check CODEOWNERS file: every line should match at least one file, sanity-check group names, '
'check that the file is sorted by paths',
)
test_pattern = subparsers.add_parser(
'test-pattern',
help='Print files in the repository for a given CODEOWNERS pattern. Useful when adding new rules.'
)
test_pattern.add_argument('--regex', action='store_true', help='Print the equivalent regular expression instead of the file list.')
test_pattern.add_argument('pattern', help='Path pattern to get the list of files for')
args = parser.parse_args()
if args.action is None:
parser.print_help()
parser.exit(1)
action_func_name = 'action_' + args.action.replace('-', '_')
action_func = globals()[action_func_name]
action_func(args)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
"""Tests for API client and hunts-related API calls."""
import csv
import io
import os
import stat
import zipfile
from absl import app
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import timeline as rdf_timeline
from grr_response_core.lib.util import chunked
from grr_response_proto import jobs_pb2
from grr_response_proto.api import hunt_pb2
from grr_response_proto.api import timeline_pb2
from grr_response_server import data_store
from grr_response_server import flow
from grr_response_server.databases import db
from grr_response_server.databases import db_test_utils
from grr_response_server.flows.general import processes as flows_processes
from grr_response_server.flows.general import timeline
from grr_response_server.gui import api_integration_test_lib
from grr_response_server.output_plugins import csv_plugin
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import hunt_objects as rdf_hunt_objects
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import hunt_test_lib
from grr.test_lib import test_lib
class ApiClientLibHuntTest(
hunt_test_lib.StandardHuntTestMixin,
api_integration_test_lib.ApiIntegrationTest,
):
"""Tests flows-related part of GRR Python API client library."""
def testListHunts(self):
hunt_id = self.StartHunt()
hs = list(self.api.ListHunts())
self.assertLen(hs, 1)
self.assertEqual(hs[0].hunt_id, hunt_id)
self.assertEqual(hs[0].data.client_limit, 100)
def testGetHunt(self):
hunt_id = self.StartHunt()
h = self.api.Hunt(hunt_id).Get()
self.assertEqual(h.hunt_id, hunt_id)
self.assertEqual(h.data.name, "GenericHunt")
def testModifyHunt(self):
hunt_id = self.StartHunt(paused=True)
h = self.api.Hunt(hunt_id).Get()
self.assertEqual(h.data.client_limit, 100)
h = h.Modify(client_limit=200)
self.assertEqual(h.data.client_limit, 200)
h = self.api.Hunt(hunt_id).Get()
self.assertEqual(h.data.client_limit, 200)
def testDeleteHunt(self):
hunt_id = self.StartHunt(paused=True)
self.api.Hunt(hunt_id).Delete()
with self.assertRaises(db.UnknownHuntError):
data_store.REL_DB.ReadHuntObject(hunt_id)
def testStartHunt(self):
hunt_id = self.StartHunt(paused=True)
h = self.api.Hunt(hunt_id).Get()
self.assertEqual(h.data.state, h.data.PAUSED)
h = h.Start()
self.assertEqual(h.data.state, h.data.STARTED)
h = self.api.Hunt(hunt_id).Get()
self.assertEqual(h.data.state, h.data.STARTED)
def testStopHunt(self):
hunt_id = self.StartHunt()
h = self.api.Hunt(hunt_id).Get()
self.assertEqual(h.data.state, h.data.STARTED)
h = h.Stop()
self.assertEqual(h.data.state, h.data.STOPPED)
h = self.api.Hunt(hunt_id).Get()
self.assertEqual(h.data.state, h.data.STOPPED)
def testListResults(self):
self.client_ids = self.SetupClients(5)
with test_lib.FakeTime(42):
hunt_id = self.StartHunt()
self.RunHunt(failrate=-1)
h = self.api.Hunt(hunt_id).Get()
results = list(h.ListResults())
client_ids = set(r.client.client_id for r in results)
self.assertEqual(client_ids, set(self.client_ids))
for r in results:
self.assertEqual(r.timestamp, 42000000)
self.assertEqual(r.payload.pathspec.path, "/tmp/evil.txt")
def testListLogsWithoutClientIds(self):
hunt_id = self.StartHunt()
client_ids = self.SetupClients(2)
self.AssignTasksToClients(client_ids)
data_store.REL_DB.WriteFlowLogEntries([
rdf_flow_objects.FlowLogEntry(
client_id=client_ids[0],
flow_id=hunt_id,
hunt_id=hunt_id,
message="Sample message: foo."),
rdf_flow_objects.FlowLogEntry(
client_id=client_ids[1],
flow_id=hunt_id,
hunt_id=hunt_id,
message="Sample message: bar.")
])
logs = list(self.api.Hunt(hunt_id).ListLogs())
self.assertLen(logs, 2)
self.assertEqual(logs[0].data.log_message, "Sample message: foo.")
self.assertEqual(logs[1].data.log_message, "Sample message: bar.")
def testListLogsWithClientIds(self):
self.client_ids = self.SetupClients(2)
hunt_id = self.StartHunt()
self.RunHunt(failrate=-1)
logs = list(self.api.Hunt(hunt_id).ListLogs())
client_ids = set()
for l in logs:
client_ids.add(l.client.client_id)
self.assertEqual(client_ids, set(self.client_ids))
def testListErrors(self):
hunt_id = self.StartHunt()
client_ids = self.SetupClients(2)
with test_lib.FakeTime(52):
flow_id = flow_test_lib.StartFlow(
flows_processes.ListProcesses,
client_id=client_ids[0],
parent=flow.FlowParent.FromHuntID(hunt_id))
flow_obj = data_store.REL_DB.ReadFlowObject(client_ids[0], flow_id)
flow_obj.flow_state = flow_obj.FlowState.ERROR
flow_obj.error_message = "Error foo."
data_store.REL_DB.UpdateFlow(client_ids[0], flow_id, flow_obj=flow_obj)
with test_lib.FakeTime(55):
flow_id = flow_test_lib.StartFlow(
flows_processes.ListProcesses,
client_id=client_ids[1],
parent=flow.FlowParent.FromHuntID(hunt_id))
flow_obj = data_store.REL_DB.ReadFlowObject(client_ids[1], flow_id)
flow_obj.flow_state = flow_obj.FlowState.ERROR
flow_obj.error_message = "Error bar."
flow_obj.backtrace = "<some backtrace>"
data_store.REL_DB.UpdateFlow(client_ids[1], flow_id, flow_obj=flow_obj)
errors = list(self.api.Hunt(hunt_id).ListErrors())
self.assertLen(errors, 2)
self.assertEqual(errors[0].log_message, "Error foo.")
self.assertEqual(errors[0].client.client_id, client_ids[0])
self.assertEqual(errors[0].backtrace, "")
self.assertEqual(errors[1].log_message, "Error bar.")
self.assertEqual(errors[1].client.client_id, client_ids[1])
self.assertEqual(errors[1].backtrace, "<some backtrace>")
def testListCrashes(self):
hunt_id = self.StartHunt()
client_ids = self.SetupClients(2)
client_mocks = dict([(client_id, flow_test_lib.CrashClientMock(client_id))
for client_id in client_ids])
self.AssignTasksToClients(client_ids)
hunt_test_lib.TestHuntHelperWithMultipleMocks(client_mocks)
crashes = list(self.api.Hunt(hunt_id).ListCrashes())
self.assertLen(crashes, 2)
self.assertCountEqual([x.client.client_id for x in crashes], client_ids)
for c in crashes:
self.assertEqual(c.crash_message, "Client killed during transaction")
def testListClients(self):
hunt_id = self.StartHunt()
client_ids = self.SetupClients(5)
self.AssignTasksToClients(client_ids=client_ids[:-1])
self.RunHunt(client_ids=[client_ids[-1]], failrate=0)
h = self.api.Hunt(hunt_id)
clients = list(h.ListClients(h.CLIENT_STATUS_STARTED))
self.assertLen(clients, 5)
clients = list(h.ListClients(h.CLIENT_STATUS_OUTSTANDING))
self.assertLen(clients, 4)
clients = list(h.ListClients(h.CLIENT_STATUS_COMPLETED))
self.assertLen(clients, 1)
self.assertEqual(clients[0].client_id, client_ids[-1])
def testGetClientCompletionStats(self):
hunt_id = self.StartHunt(paused=True)
client_ids = self.SetupClients(5)
self.AssignTasksToClients(client_ids=client_ids)
client_stats = self.api.Hunt(hunt_id).GetClientCompletionStats()
self.assertEmpty(client_stats.start_points)
self.assertEmpty(client_stats.complete_points)
def testGetStats(self):
hunt_id = self.StartHunt()
self.client_ids = self.SetupClients(5)
self.RunHunt(failrate=-1)
stats = self.api.Hunt(hunt_id).GetStats()
self.assertLen(stats.worst_performers, 5)
def testGetFilesArchive(self):
hunt_id = self.StartHunt()
zip_stream = io.BytesIO()
self.api.Hunt(hunt_id).GetFilesArchive().WriteToStream(zip_stream)
zip_fd = zipfile.ZipFile(zip_stream)
namelist = zip_fd.namelist()
self.assertTrue(namelist)
def testExportedResults(self):
hunt_id = self.StartHunt()
zip_stream = io.BytesIO()
self.api.Hunt(hunt_id).GetExportedResults(
csv_plugin.CSVInstantOutputPlugin.plugin_name).WriteToStream(zip_stream)
zip_fd = zipfile.ZipFile(zip_stream)
namelist = zip_fd.namelist()
self.assertTrue(namelist)
def testGetCollectedTimelinesBody(self):
client_id = db_test_utils.InitializeClient(data_store.REL_DB)
fqdn = "foo.bar.quux"
snapshot = rdf_objects.ClientSnapshot()
snapshot.client_id = client_id
snapshot.knowledge_base.fqdn = fqdn
data_store.REL_DB.WriteClientSnapshot(snapshot)
hunt_id = "B1C2E3D4"
flow_id = "1B2C3E4D"
hunt_obj = rdf_hunt_objects.Hunt()
hunt_obj.hunt_id = hunt_id
hunt_obj.args.standard.client_ids = [client_id]
hunt_obj.args.standard.flow_name = timeline.TimelineFlow.__name__
hunt_obj.hunt_state = rdf_hunt_objects.Hunt.HuntState.PAUSED
data_store.REL_DB.WriteHuntObject(hunt_obj)
flow_obj = rdf_flow_objects.Flow()
flow_obj.client_id = client_id
flow_obj.flow_id = flow_id
flow_obj.flow_class_name = timeline.TimelineFlow.__name__
flow_obj.create_time = rdfvalue.RDFDatetime.Now()
flow_obj.parent_hunt_id = hunt_id
data_store.REL_DB.WriteFlowObject(flow_obj)
entry_1 = rdf_timeline.TimelineEntry()
entry_1.path = "/bar/baz/quux".encode("utf-8")
entry_1.ino = 5926273453
entry_1.size = 13373
entry_1.atime_ns = 111 * 10**9
entry_1.mtime_ns = 222 * 10**9
entry_1.ctime_ns = 333 * 10**9
entry_1.mode = 0o664
entry_2 = rdf_timeline.TimelineEntry()
entry_2.path = "/bar/baz/quuz".encode("utf-8")
entry_2.ino = 6037384564
entry_2.size = 13374
entry_2.atime_ns = 777 * 10**9
entry_2.mtime_ns = 888 * 10**9
entry_2.ctime_ns = 999 * 10**9
entry_2.mode = 0o777
entries = [entry_1, entry_2]
blobs = list(rdf_timeline.TimelineEntry.SerializeStream(iter(entries)))
blob_ids = data_store.BLOBS.WriteBlobsWithUnknownHashes(blobs)
result = rdf_timeline.TimelineResult()
result.entry_batch_blob_ids = [blob_id.AsBytes() for blob_id in blob_ids]
flow_result = rdf_flow_objects.FlowResult()
flow_result.client_id = client_id
flow_result.flow_id = flow_id
flow_result.payload = result
data_store.REL_DB.WriteFlowResults([flow_result])
buffer = io.BytesIO()
self.api.Hunt(hunt_id).GetCollectedTimelines(
timeline_pb2.ApiGetCollectedTimelineArgs.Format.BODY).WriteToStream(
buffer)
with zipfile.ZipFile(buffer, mode="r") as archive:
with archive.open(f"{client_id}_{fqdn}.body", mode="r") as file:
content_file = file.read().decode("utf-8")
rows = list(csv.reader(io.StringIO(content_file), delimiter="|"))
self.assertLen(rows, 2)
self.assertEqual(rows[0][1], "/bar/baz/quux")
self.assertEqual(rows[0][2], "5926273453")
self.assertEqual(rows[0][3], stat.filemode(0o664))
self.assertEqual(rows[0][6], "13373")
self.assertEqual(rows[0][7], "111")
self.assertEqual(rows[0][8], "222")
self.assertEqual(rows[0][9], "333")
self.assertEqual(rows[1][1], "/bar/baz/quuz")
self.assertEqual(rows[1][2], "6037384564")
self.assertEqual(rows[1][3], stat.filemode(0o777))
self.assertEqual(rows[1][6], "13374")
self.assertEqual(rows[1][7], "777")
self.assertEqual(rows[1][8], "888")
self.assertEqual(rows[1][9], "999")
def testGetCollectedTimelinesGzchunked(self):
client_id = db_test_utils.InitializeClient(data_store.REL_DB)
fqdn = "foo.bar.baz"
snapshot = rdf_objects.ClientSnapshot()
snapshot.client_id = client_id
snapshot.knowledge_base.fqdn = fqdn
data_store.REL_DB.WriteClientSnapshot(snapshot)
hunt_id = "A0B1D2C3"
flow_id = "0A1B2D3C"
hunt_obj = rdf_hunt_objects.Hunt()
hunt_obj.hunt_id = hunt_id
hunt_obj.args.standard.client_ids = [client_id]
hunt_obj.args.standard.flow_name = timeline.TimelineFlow.__name__
hunt_obj.hunt_state = rdf_hunt_objects.Hunt.HuntState.PAUSED
data_store.REL_DB.WriteHuntObject(hunt_obj)
flow_obj = rdf_flow_objects.Flow()
flow_obj.client_id = client_id
flow_obj.flow_id = flow_id
flow_obj.flow_class_name = timeline.TimelineFlow.__name__
flow_obj.create_time = rdfvalue.RDFDatetime.Now()
flow_obj.parent_hunt_id = hunt_id
data_store.REL_DB.WriteFlowObject(flow_obj)
entry_1 = rdf_timeline.TimelineEntry()
entry_1.path = "/foo/bar".encode("utf-8")
entry_1.ino = 7890178901
entry_1.size = 4815162342
entry_1.atime_ns = 123 * 10**9
entry_1.mtime_ns = 234 * 10**9
entry_1.ctime_ns = 567 * 10**9
entry_1.mode = 0o654
entry_2 = rdf_timeline.TimelineEntry()
entry_2.path = "/foo/baz".encode("utf-8")
entry_1.ino = 8765487654
entry_2.size = 1337
entry_1.atime_ns = 987 * 10**9
entry_1.mtime_ns = 876 * 10**9
entry_1.ctime_ns = 765 * 10**9
entry_2.mode = 0o757
entries = [entry_1, entry_2]
blobs = list(rdf_timeline.TimelineEntry.SerializeStream(iter(entries)))
blob_ids = data_store.BLOBS.WriteBlobsWithUnknownHashes(blobs)
result = rdf_timeline.TimelineResult()
result.entry_batch_blob_ids = [blob_id.AsBytes() for blob_id in blob_ids]
flow_result = rdf_flow_objects.FlowResult()
flow_result.client_id = client_id
flow_result.flow_id = flow_id
flow_result.payload = result
data_store.REL_DB.WriteFlowResults([flow_result])
buffer = io.BytesIO()
fmt = timeline_pb2.ApiGetCollectedTimelineArgs.Format.RAW_GZCHUNKED
self.api.Hunt(hunt_id).GetCollectedTimelines(fmt).WriteToStream(buffer)
with zipfile.ZipFile(buffer, mode="r") as archive:
with archive.open(f"{client_id}_{fqdn}.gzchunked", mode="r") as file:
chunks = chunked.ReadAll(file)
entries = list(rdf_timeline.TimelineEntry.DeserializeStream(chunks))
self.assertEqual(entries, [entry_1, entry_2])
def testCreatePerClientFileCollectionHunt(self):
client_ids = self.SetupClients(1)
args = hunt_pb2.ApiCreatePerClientFileCollectionHuntArgs(
description="test hunt")
pca = args.per_client_args.add()
pca.client_id = client_ids[0]
pca.path_type = jobs_pb2.PathSpec.OS
path = os.path.join(self.base_path, "numbers.txt")
pca.paths.append(path)
h = self.api.CreatePerClientFileCollectionHunt(args)
h.Start()
self.RunHunt(
client_ids=client_ids,
client_mock=action_mocks.MultiGetFileClientMock())
results = list(h.ListResults())
self.assertLen(results, 1)
self.assertEqual(results[0].client.client_id, client_ids[0])
self.assertEqual(results[0].payload.pathspec.path, path)
self.assertEqual(results[0].payload.pathspec.pathtype, jobs_pb2.PathSpec.OS)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
|
import os
import struct
import re
import sys
from xml.dom.minidom import parseString
generalDir = 'E:\\programming\\Node\\POESkillTree\\'
spellDir = generalDir + "spellsHTML\\"
attackDir = generalDir + "attacksHTML\\"
spells = []
attacks = []
dirs = [spellDir, attackDir]
f_escape = open(generalDir+"\POEspellDmgCalculator\escape.txt", "rb")
escape = f_escape.read(1)
def printErr(*objs):
print(*objs, file=sys.stderr)
def ignore_exception(IgnoreException=Exception, DefaultVal=None):
""" Decorator for ignoring exception from a function
e.g. @ignore_exception(DivideByZero)
e.g.2. ignore_exception(DivideByZero)(Divide)(2/0)
"""
def dec(function):
def _dec(*args, **kwargs):
try:
return function(*args, **kwargs)
except IgnoreException:
return DefaultVal
return _dec
return dec
sint = ignore_exception(ValueError)(int)
sfloat = ignore_exception(ValueError)(float)
def strToFloat(s):
return float(re.search("([\d.]*)", s).group(1))
def getXmlVal(s):
return re.search("(<?\<.*?\>)(.*?)\<\/", s.lower()).group(2).strip()
def stripXml(xml):
return re.sub('\<[^>]+>', '', xml)
def getNodeVal(node):
return getXmlVal(node.toxml())
dmgTypes = ['fire', 'cold', 'light', 'phys', 'chaos', 'burning_from_fire'];
class dmg:
def __init__(self):
self.lvlStages = []
self.mana = list(1 for i in range(0, 35))
self.APS = list(1 for i in range(0, 35))
self.chains = list(1 for i in range(0, 35))
self.hasAPS = False
self.hasChain = False
self.crit = 0
self.radius = -1
self.effectiveness = 1
self.castTime = 1
def getStage(self, lvl):
for stage, n in self.lvlStages:
if n >= lvl:
return stage
return 0
def setDmg(self, stage, typeStr, dmgStr):
isDot = False
if re.match("\s?\d+?\s*-\s*\d+?\s?", dmgStr):
dmgVal = list(int(strToFloat(n)) for n in dmgStr.split("-"))
elif re.match("\s?\d+.*", dmgStr):
try:
val = float(dmgStr.replace(",", ""))
dmgVal = [val, val]
isDot = True
except Exception:
dmgVal = [0, 0]
else:
dmgVal = [0, 0]
if "fire" in typeStr:
if isDot or 'dot' in typeStr:
if not hasattr(self, 'burning_from_fire'):
self.burning_from_fire = list([0, 0] for i in range(0, 35))
self.burning_from_fire[stage] = dmgVal
else:
if not hasattr(self, 'fire'):
self.fire = list([0, 0] for i in range(0, 35))
self.fire[stage] = dmgVal
elif "cold" in typeStr or "ice" in typeStr:
if not hasattr(self, 'cold'):
self.cold = list([0, 0] for i in range(0, 35))
self.cold[stage] = dmgVal
elif "light" in typeStr:
if not hasattr(self, 'light'):
self.light = list([0, 0] for i in range(0, 35))
self.light[stage] = dmgVal
elif "chaos" in typeStr:
if not hasattr(self, 'chaos'):
self.chaos = list([0, 0] for i in range(0, 35))
self.chaos[stage] = dmgVal
elif "phys" in typeStr or "damage" == typeStr:
if not hasattr(self, 'phys'):
self.phys = list([0, 0] for i in range(0, 35))
self.phys[stage] = dmgVal
elif "mana" in typeStr:
self.mana[stage] = dmgVal[0]
elif "APS" in typeStr:
self.APS[stage] = dmgVal[0]
self.hasAPS = True
elif 'chains' in typeStr:
self.chains[stage] = dmgVal[0]
self.hasChain = True
#else:
#print("rejected dmg type {}:{}".format(typeStr, dmgStr))
def hatred(self, mult=0.36):
key = 0
for minMax in self.phys:
for val in minMax:
self.cold[key].append(val * mult)
key += 1
def addedFire(self, mult=0.39):
key = 0
for minMax in self.phys:
for val in minMax:
self.fire[key].append(val * mult)
key += 1
def getAvgDmg(self, lvl=19):
stage = lvl#self.getStage(lvl)
return (self.effectiveness * (1 + self.crit) / 2 / self.castTime *
(sum(self.fire[stage]) +
sum(self.cold[stage]) +
sum(self.light[stage]) +
sum(self.phys[stage]) +
sum(self.chaos[stage])))
def getDmgPerMana(self, lvl=19):
return self.getAvgDmg(lvl) / self.mana[lvl]
def tryGetTitle(node):
try:
nodeTitle = re.search('title="([^"]*)"', node).group(1)
except Exception:
nodeTitle = node;
return nodeTitle.replace('category:', '').replace(' skills', '')
def fixUnclosedTags(content):
return re.sub('(\<img [^>]*?)/?\>', '\g<1> />', content).replace('<br>', '<br />')
def matchClosingTag(content, reStart, open, close):
openStr = re.search(reStart, content)
if not openStr:
return False
offset = content.find(openStr.group(0))
innerTag = content.find(open, offset + open.__len__())
closing = content.find(close, offset) + close.__len__()
openTags = 0
if innerTag < closing:
openTags = 1
while innerTag > 0 and openTags > 0:
closing = content.find(close, closing) + close.__len__()
innerTag = content.find(open, innerTag + open.__len__())
if innerTag > closing:
openTags = 0
return content[offset:closing]
class skill:
def __init__(self, fileName, dir):
print(fileName)
self.name = fileName.split(".")[0]
self.dmg = dmg()
self.keywords = []
self.modifiers = []
self.qualityBonus = ""
f_spell = open(dir + file, "rb")
content = ""
byte = f_spell.read(1)
prev = 0;
prevPrev = 0
while byte:
if escape == byte:
content += "-"
else:
content += byte.decode("utf-8", "ignore")
byte = f_spell.read(1)
f_spell.close()
content = fixUnclosedTags(content.replace("–", "-").replace("\r", "").replace("\n", ""))
self.content = content.lower()
self.getDmgTable(content)
self.parseMetadata()
def getDmgTable(self, content):
tableStr = matchClosingTag(content, '\<table [^>]*?class="[^"]*?GemLevelTable', "<table", "</table>")
try:
table = parseString(tableStr).getElementsByTagName("table")[0]
except Exception:
print('table str:', tableStr)
table = parseString(tableStr).getElementsByTagName("table")[0]
if "GemLevelTable" in table.attributes["class"].value :
self.parseDmg(table)
def parseDmg(self, table):
charLvlColumn = -1
dmgColumnNames = {}
rows = table.getElementsByTagName("tr")
rowId = 0
for row in rows:
i = 0
if 0 == rowId :
for td in row.getElementsByTagName("th"):
tdTxt = getNodeVal(td)
if "required level" in tdTxt:
charLvlColumn = i - 1
elif "dot" in tdTxt:
dmgColumnNames[i - 1] = tdTxt
elif "damage" in tdTxt:
if not "absorption" in tdTxt:
dmgColumnNames[i - 1] = tdTxt
elif "cost" in tdTxt:
dmgColumnNames[i - 1] = tdTxt
elif 'aps' in tdTxt.lower():
dmgColumnNames[i - 1] = 'APS'
elif 'chains' in tdTxt.lower():
dmgColumnNames[i - 1] = 'chains'
i += 1
else:
for td in row.getElementsByTagName("td"):
if i == charLvlColumn:
val = stripXml(getNodeVal(td))
if not val:
val = -1
val = sint(val)
self.dmg.lvlStages.append(val)
elif i in dmgColumnNames.keys():
dmgVal = stripXml(getNodeVal(td))
if None is dmgVal or None is re.match('\d+', dmgVal):
if val in self.dmg.lvlStages:
self.dmg.lvlStages.remove(val)
else:
self.dmg.setDmg(rowId, dmgColumnNames[i], dmgVal)
i += 1
rowId += 1
def parseMetadata(self):
eff = "100%"
crit = "0%"
self.metaString = matchClosingTag(self.content, '\<table [^>]*?class="[^"]*?GemInfoboxContainer'.lower(), "<table", "</table>")
try:
eff = self.getMeta("effectiveness")
self.dmg.effectiveness = self.percentToFloat(eff)
except Exception: pass
try:
crit = self.getMeta("crit.*?chance")
self.dmg.crit = self.percentToFloat(crit)
except Exception: pass
try:
castTime = self.getMeta("cast.*?time")
self.dmg.castTime = self.strToFloat(castTime)
except Exception: pass
try:
qualityBonus = stripXml(self.getMeta("per.*?quality"))
self.qualityBonus = self.getBonus(qualityBonus)
except Exception: pass
try:
keywords = self.getMeta("keywords")
self.keywords = [tryGetTitle(word) for word in keywords.split(',')]
except Exception: pass
try:
radius = self.getMeta("radius")
self.dmg.radius = self.strToFloat(radius)
except Exception: pass
modifiers = []
offset = 1
while offset > 0:
modifier = matchClosingTag(self.metaString[offset + 1:], '\<div [^>]*?class="[^"]*?GemInfoboxModifier'.lower(), "<div", "</div>")
if not modifier:
offset = -1
else:
offset += 1 + self.metaString[offset + 1:].find(modifier)
modifiers.append(getXmlVal(modifier))
self.modifiers = [modifier.replace("'", "") for modifier in modifiers]
def getBonus(self, s):
return s
def strToFloat(self, s):
return float(re.search("([\d.]*)", s).group(1))
def percentToFloat(self, s):
return self.strToFloat(s) / 100
def getMeta(self, name):
regexStr = ("\<tr\>\s*\<td\>.*?" +
name.lower() +
".*?\<\/td\>\s*\<td\>\s*(.*?)\<\/td\>")
return re.search(regexStr, self.metaString).group(1).strip()
isSpells = True
for dir in dirs:
for file in os.listdir(dir):
newSkill = skill(file, dir)
if isSpells:
spells.append(newSkill)
else:
attacks.append(newSkill)
isSpells = False
# s = "Name\tavgPhys\tavgFire\tavgCold\tavglight\teff\tcrit\taddedFire\taddedCold\taddedLight\tshocked multi\tburnDmg\tMana\ttotal dmg\n"
# row = 2
# for skill in skills:
# s += "{}\n".format(
# "\t".join(str(x) for x in [
# skill.name, #D
# "{}".format(sum(skill.dmg.phys[19]) / 2, row),#E
# "={}+K{}".format(sum(skill.dmg.fire[19]) / 2, row),#F
# "={}+L{}".format(sum(skill.dmg.cold[19]) / 2, row),#G
# "={}+M{}".format(sum(skill.dmg.light[19]) / 2, row),#H
# skill.dmg.effectiveness,#I
# skill.dmg.crit,#J
# "=E{} * $B$2".format(row),#K
# "=E{} * $B$1".format(row),#L
# "=E{} * $B$3".format(row),#M
# "=(1 + (J{0} * (H{0} > 0)))".format(row),#N shock multi
# "=0.8 * J{0} * F{0}".format(row),#O burn dmg
# skill.dmg.mana[19],#P
# "=(E{0}+F{0}+G{0}+H{0}) * I{0} * (1 + J{0}) * N{0}".format(row),#Q
# "=(Q{0} + O{0}) * N{0}".format(row)#R
# ]))
# row += 1
# f = open("skillAvgDmgTest.txt", "w")
# f.write(s)
# f.close()
def printMinMaxDmg(dmg):
return "{{'min': {}, 'max': {}}}".format(dmg[0], dmg[1])
for foo in [True, False]:
skillStrs = []
if foo:
skills = spells
fileName = 'parsedSpells.json'
for skill in skills:
skillStr = "'{}': {{'crit': {}, 'eff': {}, 'castTime': {}, 'radius': {}, 'qualityBonus': '{}', 'keywords': [{}], 'modifiers': [{}], 'hasAPS': {}, 'chains': {}, 'dmg': [".format(
skill.name,
skill.dmg.crit,
skill.dmg.effectiveness,
skill.dmg.castTime,
skill.dmg.radius,
skill.qualityBonus,
', '.join(["'{}'".format(word) for word in skill.keywords]),
', '.join(["'{}'".format(modifier) for modifier in skill.modifiers]),
'true' if skill.dmg.hasAPS else 'false',
'true' if skill.dmg.hasChain else 'false')
i = 1
dmgStrs = []
for lvl in skill.dmg.lvlStages:
dmgStr = "{"
dmgStr += "'lvl': '{}'".format(lvl)
for type in dmgTypes:
if hasattr(skill.dmg, type):
dmgStr += ", '{}': {}".format(type, printMinMaxDmg(getattr(skill.dmg, type)[i]))
dmgStr += ", 'mana': {}".format(skill.dmg.mana[i])
if skill.dmg.hasAPS:
dmgStr += ", 'APS': {}".format(skill.dmg.APS[i])
if skill.dmg.hasChain:
dmgStr += ", 'chain': {}".format(skill.dmg.chains[i])
dmgStr += "}";
dmgStrs.append(dmgStr)
i += 1
skillStr += ', '.join(dmgStrs)
skillStr += "]}"
skillStrs.append(skillStr)
else:
skills = attacks
fileName = 'parsedAttacks.json'
for skill in skills:
skillStr = "'{}': {{'eff': {}, 'qualityBonus': '{}', 'keywords': [{}], 'modifiers': [{}], 'dmg': [".format(
skill.name,
skill.dmg.effectiveness,
skill.qualityBonus,
', '.join(["'{}'".format(word) for word in skill.keywords]),
', '.join(["'{}'".format(modifier) for modifier in skill.modifiers]))
i = 1
dmgStrs = []
for lvl in skill.dmg.lvlStages:
dmgStr = "{"
dmgStr += "'lvl': '{}'".format(lvl)
for type in dmgTypes:
if hasattr(skill.dmg, type):
dmgStr += ", '{}': {}".format(type, printMinMaxDmg(getattr(skill.dmg, type)[i]))
dmgStr += ", 'mana': {}".format(skill.dmg.mana[i])
dmgStr += "}";
dmgStrs.append(dmgStr)
i += 1
skillStr += ', '.join(dmgStrs)
skillStr += "]}"
skillStrs.append(skillStr)
f = open(generalDir + fileName, "w")
f.write('{' + (', '.join(skillStrs)) + '}')
f.close()
|
|
#
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module defines the types of the formulae handled by pySMT.
In the current version these are:
* Bool
* Int
* Real
* BVType
* FunctionType
Types are represented by singletons. Basic types (Bool, Int and Real)
are constructed here by default, while BVType and FunctionType relies
on a factory service. Each BitVector width is represented by a
different instance of BVType.
"""
# Global dictionary of types, used to store the singletons
__CUSTOM_TYPES__ = {}
__BV_TYPES__ = {}
class PySMTType(object):
"""Abstract class for representing a type within pySMT."""
def __init__(self, type_id=-1):
self.type_id = type_id
def is_bool_type(self):
return False
def is_int_type(self):
return False
def is_real_type(self):
return False
def is_bv_type(self):
return False
def is_function_type(self):
return False
def __hash__(self):
return self.type_id
def __eq__(self, other):
if other is None:
return False
return self.type_id == other.type_id
def __ne__(self, other):
return not (self == other)
class BooleanType(PySMTType):
def __init__(self):
PySMTType.__init__(self, type_id = 0)
def is_bool_type(self):
return True
def as_smtlib(self, funstyle=True):
if funstyle:
return "() Bool"
else:
return "Bool"
def __str__(self):
return "Bool"
class RealType(PySMTType):
def __init__(self):
PySMTType.__init__(self, type_id = 1)
def is_real_type(self):
return True
def as_smtlib(self, funstyle=True):
if funstyle:
return "() Real"
else:
return "Real"
def __str__(self):
return "Real"
class IntType(PySMTType):
def __init__(self):
PySMTType.__init__(self, type_id = 2)
def is_int_type(self):
return True
def as_smtlib(self, funstyle=True):
if funstyle:
return "() Int"
else:
return "Int"
def __str__(self):
return "Int"
def BVType(width=32):
"""Returns the singleton associated to the BV type for the given width.
This function takes care of building and registering the type
whenever needed. To see the functions provided by the type look at
_BVType.
"""
key = width
if key in __BV_TYPES__:
return __BV_TYPES__[key]
res = _BVType(width=width)
__BV_TYPES__[key] = res
return res
class _BVType(PySMTType):
"""Internal class to represent a BitVector type.
This class should not be instantiated directly, but the factory
method BVType should be used instead.
"""
def __init__(self, width=32):
PySMTType.__init__(self, type_id = 3)
self.width = width
def is_bv_type(self, width=None):
if width:
return self.width == width
return True
def as_smtlib(self, funstyle=True):
if funstyle:
return "() (_ BitVec %d)" % self.width
else:
return "(_ BitVec %d)" % self.width
def __str__(self):
return "BV%d" % self.width
def __eq__(self, other):
if other is None:
return False
if self.type_id != other.type_id:
return False
if self.width != other.width:
return False
return True
def __hash__(self):
return hash(self.type_id + self.width)
# FunctionType is a Factory that returns a _FunctionType
def FunctionType(return_type, param_types):
"""Returns the singleton associated to the Function type with the given arguments.
This function takes care of building and registering the type
whenever needed. To see the functions provided by the type look at
_FunctionType
"""
param_types = tuple(param_types)
key = (return_type, param_types)
if key in __CUSTOM_TYPES__:
return __CUSTOM_TYPES__[key]
res = _FunctionType(return_type=return_type,
param_types=param_types)
__CUSTOM_TYPES__[key] = res
return res
class _FunctionType(PySMTType):
"""Internal class used to represent a Function type.
This class should not be instantiated directly, but the factory
method FunctionType should be used instead.
"""
def __init__(self, return_type, param_types):
PySMTType.__init__(self, type_id = 4)
self.return_type = return_type
self.param_types = param_types
self._hash = hash(str(self))
return
def as_smtlib(self, funstyle=True):
args = [p.as_smtlib(False)
for p in self.param_types]
rtype = self.return_type.as_smtlib(False)
if funstyle:
res = "(%s) %s" % (" ".join(args), rtype)
else:
res = " -> ".join(args+[rtype])
return res
def __str__(self):
return " -> ".join([str(p) for p in self.param_types] +
[str(self.return_type)])
def is_function_type(self):
return True
def __eq__(self, other):
if other is None:
return False
if self.type_id != other.type_id:
return False
if id(self) == id(other):
return True
return str(self) == str(other)
def __hash__(self):
return self._hash
# Singletons for the basic types
BOOL = BooleanType()
REAL = RealType()
INT = IntType()
# Helper Constants
PYSMT_TYPES = frozenset([BOOL, REAL, INT])
BV1, BV8, BV16, BV32, BV64, BV128 = [BVType(i) for i in [1, 8, 16, 32, 64, 128]]
|
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import sys
from gpu_tests import gpu_helper
from gpu_tests import gpu_integration_test
from gpu_tests import path_util
from gpu_tests import webgl_test_util
conformance_harness_script = r"""
var testHarness = {};
testHarness._allTestSucceeded = true;
testHarness._messages = '';
testHarness._failures = 0;
testHarness._finished = false;
testHarness._originalLog = window.console.log;
testHarness.log = function(msg) {
testHarness._messages += msg + "\n";
testHarness._originalLog.apply(window.console, [msg]);
}
testHarness.reportResults = function(url, success, msg) {
testHarness._allTestSucceeded = testHarness._allTestSucceeded && !!success;
if(!success) {
testHarness._failures++;
if(msg) {
testHarness.log(msg);
}
}
};
testHarness.notifyFinished = function(url) {
testHarness._finished = true;
};
testHarness.navigateToPage = function(src) {
var testFrame = document.getElementById("test-frame");
testFrame.src = src;
};
window.webglTestHarness = testHarness;
window.parent.webglTestHarness = testHarness;
window.console.log = testHarness.log;
window.onerror = function(message, url, line) {
testHarness.reportResults(null, false, message);
testHarness.notifyFinished(null);
};
window.quietMode = function() { return true; }
"""
extension_harness_additional_script = r"""
window.onload = function() { window._loaded = true; }
"""
def _CompareVersion(version1, version2):
ver_num1 = [int(x) for x in version1.split('.')]
ver_num2 = [int(x) for x in version2.split('.')]
size = min(len(ver_num1), len(ver_num2))
return cmp(ver_num1[0:size], ver_num2[0:size])
class WebGLConformanceIntegrationTest(gpu_integration_test.GpuIntegrationTest):
_webgl_version = None
_is_asan = False
_crash_count = 0
_gl_backend = ""
_angle_backend = ""
_command_decoder = ""
_verified_flags = False
@classmethod
def Name(cls):
return 'webgl_conformance'
@classmethod
def AddCommandlineArgs(cls, parser):
super(WebGLConformanceIntegrationTest, cls).AddCommandlineArgs(parser)
parser.add_option(
'--webgl-conformance-version',
help='Version of the WebGL conformance tests to run.',
default='1.0.4')
parser.add_option(
'--webgl2-only',
help='Whether we include webgl 1 tests if version is 2.0.0 or above.',
default='false')
parser.add_option(
'--is-asan',
help='Indicates whether currently running an ASAN build',
action='store_true',
default=False)
@classmethod
def GenerateGpuTests(cls, options):
#
# Conformance tests
#
test_paths = cls._ParseTests('00_test_list.txt',
options.webgl_conformance_version,
(options.webgl2_only == 'true'), None)
cls._webgl_version = [
int(x) for x in options.webgl_conformance_version.split('.')
][0]
cls._is_asan = options.is_asan
for test_path in test_paths:
test_path_with_args = test_path
if cls._webgl_version > 1:
test_path_with_args += '?webglVersion=' + str(cls._webgl_version)
yield (test_path.replace(os.path.sep, '/'),
os.path.join(webgl_test_util.conformance_relpath,
test_path_with_args), ('_RunConformanceTest'))
#
# Extension tests
#
extension_tests = cls._GetExtensionList()
# Coverage test.
yield ('WebglExtension_TestCoverage',
os.path.join(webgl_test_util.extensions_relpath,
'webgl_extension_test.html'),
('_RunExtensionCoverageTest', extension_tests, cls._webgl_version))
# Individual extension tests.
for extension in extension_tests:
yield ('WebglExtension_%s' % extension,
os.path.join(webgl_test_util.extensions_relpath,
'webgl_extension_test.html'),
('_RunExtensionTest', extension, cls._webgl_version))
@classmethod
def _GetExtensionList(cls):
if cls._webgl_version == 1:
return [
'ANGLE_instanced_arrays',
'EXT_blend_minmax',
'EXT_color_buffer_half_float',
'EXT_disjoint_timer_query',
'EXT_float_blend',
'EXT_frag_depth',
'EXT_shader_texture_lod',
'EXT_sRGB',
'EXT_texture_compression_bptc',
'EXT_texture_compression_rgtc',
'EXT_texture_filter_anisotropic',
'KHR_parallel_shader_compile',
'OES_element_index_uint',
'OES_fbo_render_mipmap',
'OES_standard_derivatives',
'OES_texture_float',
'OES_texture_float_linear',
'OES_texture_half_float',
'OES_texture_half_float_linear',
'OES_vertex_array_object',
'WEBGL_color_buffer_float',
'WEBGL_compressed_texture_astc',
'WEBGL_compressed_texture_etc',
'WEBGL_compressed_texture_etc1',
'WEBGL_compressed_texture_pvrtc',
'WEBGL_compressed_texture_s3tc',
'WEBGL_compressed_texture_s3tc_srgb',
'WEBGL_debug_renderer_info',
'WEBGL_debug_shaders',
'WEBGL_depth_texture',
'WEBGL_draw_buffers',
'WEBGL_lose_context',
'WEBGL_multi_draw',
'WEBGL_video_texture',
]
else:
return [
'EXT_color_buffer_float',
'EXT_disjoint_timer_query_webgl2',
'EXT_float_blend',
'EXT_texture_compression_bptc',
'EXT_texture_compression_rgtc',
'EXT_texture_filter_anisotropic',
'EXT_texture_norm16',
'KHR_parallel_shader_compile',
'OES_texture_float_linear',
'OVR_multiview2',
'WEBGL_compressed_texture_astc',
'WEBGL_compressed_texture_etc',
'WEBGL_compressed_texture_etc1',
'WEBGL_compressed_texture_pvrtc',
'WEBGL_compressed_texture_s3tc',
'WEBGL_compressed_texture_s3tc_srgb',
'WEBGL_debug_renderer_info',
'WEBGL_debug_shaders',
'WEBGL_draw_instanced_base_vertex_base_instance',
'WEBGL_lose_context',
'WEBGL_multi_draw',
'WEBGL_multi_draw_instanced_base_vertex_base_instance',
'WEBGL_video_texture',
]
def RunActualGpuTest(self, test_path, *args):
# This indirection allows these tests to trampoline through
# _RunGpuTest.
test_name = args[0]
getattr(self, test_name)(test_path, *args[1:])
def _VerifyGLBackend(self, gpu_info):
# Verify that Chrome's GL backend matches if a specific one was requested
if self._gl_backend:
if (self._gl_backend == 'angle'
and gpu_helper.GetANGLERenderer(gpu_info) == 'no_angle'):
self.fail('requested GL backend (' + self._gl_backend + ')' +
' had no effect on the browser: ' +
_GetGPUInfoErrorString(gpu_info))
return False
return True
def _VerifyANGLEBackend(self, gpu_info):
if self._angle_backend:
# GPU exepections use slightly different names for the angle backends
# than the Chrome flags
known_backend_flag_map = {
'd3d11': ['d3d11'],
'd3d9': ['d3d9'],
'opengl': ['gl'],
'opengles': ['gles'],
'vulkan': ['vulkan'],
# Support setting VK_ICD_FILENAMES for swiftshader when requesting
# the 'vulkan' backend.
'swiftshader': ['swiftshader', 'vulkan'],
}
current_angle_backend = gpu_helper.GetANGLERenderer(gpu_info)
if (current_angle_backend not in known_backend_flag_map or
self._angle_backend not in \
known_backend_flag_map[current_angle_backend]):
self.fail('requested ANGLE backend (' + self._angle_backend + ')' +
' had no effect on the browser: ' +
_GetGPUInfoErrorString(gpu_info))
return False
return True
def _VerifyCommandDecoder(self, gpu_info):
if self._command_decoder:
# GPU exepections use slightly different names for the command decoders
# than the Chrome flags
known_command_decoder_flag_map = {
'passthrough': 'passthrough',
'no_passthrough': 'validating',
}
current_command_decoder = gpu_helper.GetCommandDecoder(gpu_info)
if (current_command_decoder not in known_command_decoder_flag_map or
known_command_decoder_flag_map[current_command_decoder] != \
self._command_decoder):
self.fail('requested command decoder (' + self._command_decoder + ')' +
' had no effect on the browser: ' +
_GetGPUInfoErrorString(gpu_info))
return False
return True
def _NavigateTo(self, test_path, harness_script):
gpu_info = self.browser.GetSystemInfo().gpu
self._crash_count = gpu_info.aux_attributes['process_crash_count']
if not self._verified_flags:
# If the user specified any flags for ANGLE or the command decoder,
# verify that the browser is actually using the requested configuration
if (self._VerifyGLBackend(gpu_info) and self._VerifyANGLEBackend(gpu_info)
and self._VerifyCommandDecoder(gpu_info)):
self._verified_flags = True
url = self.UrlOfStaticFilePath(test_path)
self.tab.Navigate(url, script_to_evaluate_on_commit=harness_script)
def _CheckTestCompletion(self):
self.tab.action_runner.WaitForJavaScriptCondition(
'webglTestHarness._finished', timeout=self._GetTestTimeout())
if self._crash_count != self.browser.GetSystemInfo().gpu \
.aux_attributes['process_crash_count']:
self.fail('GPU process crashed during test.\n' +
self._WebGLTestMessages(self.tab))
elif not self._DidWebGLTestSucceed(self.tab):
self.fail(self._WebGLTestMessages(self.tab))
def _RunConformanceTest(self, test_path, *args):
del args # Unused in conformance tests.
self._NavigateTo(test_path, conformance_harness_script)
self._CheckTestCompletion()
def _RunExtensionCoverageTest(self, test_path, *args):
self._NavigateTo(test_path, _GetExtensionHarnessScript())
self.tab.action_runner.WaitForJavaScriptCondition(
'window._loaded', timeout=self._GetTestTimeout())
extension_list = args[0]
webgl_version = args[1]
context_type = "webgl2" if webgl_version == 2 else "webgl"
extension_list_string = "["
for extension in extension_list:
extension_list_string = extension_list_string + extension + ", "
extension_list_string = extension_list_string + "]"
self.tab.action_runner.EvaluateJavaScript(
'checkSupportedExtensions({{ extensions_string }}, {{context_type}})',
extensions_string=extension_list_string,
context_type=context_type)
self._CheckTestCompletion()
def _RunExtensionTest(self, test_path, *args):
self._NavigateTo(test_path, _GetExtensionHarnessScript())
self.tab.action_runner.WaitForJavaScriptCondition(
'window._loaded', timeout=self._GetTestTimeout())
extension = args[0]
webgl_version = args[1]
context_type = "webgl2" if webgl_version == 2 else "webgl"
self.tab.action_runner.EvaluateJavaScript(
'checkExtension({{ extension }}, {{ context_type }})',
extension=extension,
context_type=context_type)
self._CheckTestCompletion()
def _GetTestTimeout(self):
timeout = 300
if self._is_asan:
# Asan runs much slower and needs a longer timeout
timeout *= 2
return timeout
@classmethod
def SetupWebGLBrowserArgs(cls, browser_args):
# --test-type=gpu is used only to suppress the "Google API Keys are missing"
# infobar, which causes flakiness in tests.
browser_args += [
'--autoplay-policy=no-user-gesture-required',
'--disable-domain-blocking-for-3d-apis',
'--disable-gpu-process-crash-limit',
'--test-type=gpu',
'--enable-webgl-draft-extensions',
# Try disabling the GPU watchdog to see if this affects the
# intermittent GPU process hangs that have been seen on the
# waterfall. crbug.com/596622 crbug.com/609252
'--disable-gpu-watchdog',
# TODO(http://crbug.com/832952): Remove this when WebXR spec is more
# stable and setCompatibleXRDevice is part of the conformance test.
'--disable-blink-features=WebXR',
# TODO(crbug.com/830901): see whether disabling this feature
# makes the WebGL video upload tests reliable again.
'--disable-features=UseSurfaceLayerForVideo',
]
# Note that the overriding of the default --js-flags probably
# won't interact well with RestartBrowserIfNecessaryWithArgs, but
# we don't use that in this test.
browser_options = cls._finder_options.browser_options
builtin_js_flags = '--js-flags=--expose-gc'
found_js_flags = False
user_js_flags = ''
if browser_options.extra_browser_args:
for o in browser_options.extra_browser_args:
if o.startswith('--js-flags'):
found_js_flags = True
user_js_flags = o
break
if o.startswith('--use-gl='):
cls._gl_backend = o[len('--use-gl='):]
if o.startswith('--use-angle='):
cls._angle_backend = o[len('--use-angle='):]
if o.startswith('--use-cmd-decoder='):
cls._command_decoder = o[len('--use-cmd-decoder='):]
if found_js_flags:
logging.warning('Overriding built-in JavaScript flags:')
logging.warning(' Original flags: ' + builtin_js_flags)
logging.warning(' New flags: ' + user_js_flags)
else:
browser_args += [builtin_js_flags]
cls.CustomizeBrowserArgs(browser_args)
@classmethod
def SetUpProcess(cls):
super(WebGLConformanceIntegrationTest, cls).SetUpProcess()
cls.SetupWebGLBrowserArgs([])
cls.StartBrowser()
# By setting multiple server directories, the root of the server
# implicitly becomes the common base directory, i.e., the Chromium
# src dir, and all URLs have to be specified relative to that.
cls.SetStaticServerDirs([
os.path.join(path_util.GetChromiumSrcDir(),
webgl_test_util.conformance_relpath),
os.path.join(path_util.GetChromiumSrcDir(),
webgl_test_util.extensions_relpath)
])
# Helper functions.
@staticmethod
def _DidWebGLTestSucceed(tab):
return tab.EvaluateJavaScript('webglTestHarness._allTestSucceeded')
@staticmethod
def _WebGLTestMessages(tab):
return tab.EvaluateJavaScript('webglTestHarness._messages')
@classmethod
def _ParseTests(cls, path, version, webgl2_only, folder_min_version):
def _ParseTestNameAndVersions(line):
"""Parses any min/max versions and the test name on the given line.
Args:
line: A string containing the line to be parsed.
Returns:
A tuple (test_name, min_version, max_version) containing the test name
and parsed minimum/maximum versions found as strings. Min/max values can
be None if no version was found.
"""
line_tokens = line.split(' ')
test_name = line_tokens[-1]
i = 0
min_version = None
max_version = None
while i < len(line_tokens):
token = line_tokens[i]
if token == '--min-version':
i += 1
min_version = line_tokens[i]
elif token == '--max-version':
i += 1
max_version = line_tokens[i]
i += 1
return test_name, min_version, max_version
test_paths = []
full_path = os.path.normpath(
os.path.join(webgl_test_util.conformance_path, path))
if not os.path.exists(full_path):
raise Exception('The WebGL conformance test path specified ' +
'does not exist: ' + full_path)
with open(full_path, 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
if line.startswith('//') or line.startswith('#'):
continue
test_name, min_version, max_version = _ParseTestNameAndVersions(line)
min_version_to_compare = min_version or folder_min_version
if (min_version_to_compare
and _CompareVersion(version, min_version_to_compare) < 0):
continue
if max_version and _CompareVersion(version, max_version) > 0:
continue
if (webgl2_only and not '.txt' in test_name
and (not min_version_to_compare
or not min_version_to_compare.startswith('2'))):
continue
include_path = os.path.join(os.path.dirname(path), test_name)
if '.txt' in test_name:
# We only check min-version >= 2.0.0 for the top level list.
test_paths += cls._ParseTests(include_path, version, webgl2_only,
min_version_to_compare)
else:
test_paths.append(include_path)
return test_paths
@classmethod
def GetPlatformTags(cls, browser):
tags = super(WebGLConformanceIntegrationTest, cls).GetPlatformTags(browser)
tags.extend([['no-asan', 'asan'][cls._is_asan],
'webgl-version-%d' % cls._webgl_version])
if gpu_helper.EXPECTATIONS_DRIVER_TAGS:
system_info = browser.GetSystemInfo()
if system_info:
gpu_info = system_info.gpu
driver_vendor = gpu_helper.GetGpuDriverVendor(gpu_info)
driver_version = gpu_helper.GetGpuDriverVersion(gpu_info)
if driver_vendor and driver_version:
driver_vendor = driver_vendor.lower()
driver_version = driver_version.lower()
# Extract the string of vendor from 'angle (vendor)'
matcher = re.compile(r'^angle \(([a-z]+)\)$')
match = matcher.match(driver_vendor)
if match:
driver_vendor = match.group(1)
# Extract the substring before first space/dash/underscore
matcher = re.compile(r'^([a-z\d]+)([\s\-_]+[a-z\d]+)+$')
match = matcher.match(driver_vendor)
if match:
driver_vendor = match.group(1)
for tag in gpu_helper.EXPECTATIONS_DRIVER_TAGS:
match = gpu_helper.MatchDriverTag(tag)
assert match
if (driver_vendor == match.group(1)
and gpu_helper.EvaluateVersionComparison(
driver_version, match.group(2), match.group(3),
browser.platform.GetOSName(), driver_vendor)):
tags.append(tag)
return tags
@classmethod
def ExpectationsFiles(cls):
assert cls._webgl_version == 1 or cls._webgl_version == 2
if cls._webgl_version == 1:
file_name = 'webgl_conformance_expectations.txt'
else:
file_name = 'webgl2_conformance_expectations.txt'
return [
os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'test_expectations',
file_name)
]
def _GetGPUInfoErrorString(gpu_info):
primary_gpu = gpu_info.devices[0]
error_str = 'primary gpu=' + primary_gpu.device_string
if gpu_info.aux_attributes:
gl_renderer = gpu_info.aux_attributes.get('gl_renderer')
if gl_renderer:
error_str += ', gl_renderer=' + gl_renderer
return error_str
def _GetExtensionHarnessScript():
return conformance_harness_script + extension_harness_additional_script
def load_tests(loader, tests, pattern):
del loader, tests, pattern # Unused.
return gpu_integration_test.LoadAllTestsInModule(sys.modules[__name__])
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import pytest
from fenrir.http.parser import HTTPParser, ParserError
ParserResult = collections.namedtuple(
"ParserResult",
["http_version", "method", "path", "query", "headers"],
)
class TestHTTPParser:
@pytest.mark.parametrize(
("lines", "expected"),
[
(
[b"GET / HTTP/1.1\r\n", b"\r\n"],
ParserResult(b"HTTP/1.1", b"GET", b"/", None, []),
),
(
[b"GET /?q=wat HTTP/1.1\r\n", b"\r\n"],
ParserResult(b"HTTP/1.1", b"GET", b"/", b"q=wat", []),
),
(
[b"GET / HTTP/1.1\r\n", b"Foo: Bar\r\n", b"\r\n"],
ParserResult(
b"HTTP/1.1",
b"GET",
b"/",
None,
[(b"Foo", b"Bar")],
),
),
(
[
b"GET / HTTP/1.1\r\n",
b"Foo: Bar\r\n",
b"Wat: Ok\r\n",
b"Foo: BarTwo\r\n",
b"\r\n",
],
ParserResult(
b"HTTP/1.1",
b"GET",
b"/",
None,
[(b"Foo", b"Bar"), (b"Wat", b"Ok"), (b"Foo", b"BarTwo")],
),
),
],
)
def test_parse_success(self, lines, expected):
parser = HTTPParser()
for line in lines:
assert parser.parse(line) == len(line)
assert parser.finished
assert parser.http_version == expected.http_version
assert parser.method == expected.method
assert parser.path == expected.path
assert parser.query == expected.query
assert parser.headers == expected.headers
@pytest.mark.parametrize(
"lines",
[
[b"GET / HTTP/1.1\r\n", b"Foo : Bar\r\n"],
],
)
def test_parse_error(self, lines):
parser = HTTPParser()
for line in lines[:-1]:
assert parser.parse(line) == len(line)
with pytest.raises(ParserError):
parser.parse(lines[-1])
def test_parse_offset_length(self):
msg = (
b"GET / HTTP/1.1\r\n"
b"Foo: Bar\r\n"
b"\r\n"
)
parser = HTTPParser()
assert parser.parse(msg, 5) == 5
assert parser.parse(msg, 10, 5) == 10
assert parser.parse(msg, offset=15) == 13
assert parser.finished
assert parser.http_version == b"HTTP/1.1"
assert parser.method == b"GET"
assert parser.path == b"/"
assert parser.query is None
assert parser.headers == [(b"Foo", b"Bar")]
def test_parse_past_data_end(self):
msg = (
b"GET / HTTP/1.1\r\n"
b"Foo: Bar\r\n"
b"\r\n"
)
parser = HTTPParser()
with pytest.raises(ValueError):
parser.parse(msg, 10000)
def test_parser_error_improperly_escaped_urls(self):
"""
RFC 7230 Section 3.1.1 states:
Recipients of an invalid request-line SHOULD respond with either a 400
(Bad Request) error or a 301 (Moved Permanently) redirect with the
request-target properly encoded. A recipient SHOULD NOT attempt to
autocorrect and then process the request without a redirect, since the
invalid request-line might be deliberately crafted to bypass security
filters along the request chain.
"""
parser = HTTPParser()
with pytest.raises(ParserError):
parser.parse(b"GET /foo bar/ HTTP/1.1\r\n\r\n")
def test_parser_error_space_in_field_name(self):
"""
RFC 7320 Section 3.2.4 states:
No whitespace is allowed between the header field-name and colon. In
the past, differences in the handling of such whitespace have led to
security vulnerabilities in request routing and response handling. A
server MUST reject any received request message that contains
whitespace between a header field-name and colon with a response code
of 400 (Bad Request).
"""
parser = HTTPParser()
with pytest.raises(ParserError):
parser.parse(b"GET / HTTP/1.1\r\nFoo : Bar\r\n\r\n")
def test_header_values_ignore_leading_trailing_whitespace(self):
"""
RFC 7320 Section 3.2.4 states:
A field value might be preceded and/or followed by optional whitespace
(OWS); a single SP preceding the field-value is preferred for
consistent readability by humans. The field value does not include any
leading or trailing whitespace: OWS occurring before the first
non-whitespace octet of the field value or after the last
non-whitespace octet of the field value ought to be excluded by parsers
when extracting the field value from a header field.
"""
parser = HTTPParser()
parser.parse(b"GET / HTTP/1.1\r\nFoo: Bar \r\n\r\n")
assert parser.headers == [(b"Foo", b"Bar")]
def test_parser_accepts_extraneous_newlines(self):
"""
RFC 7230 Section 3.5 states:
In the interest of robustness, a server that is expecting to receive
and parse a request-line SHOULD ignore at least one empty line (CRLF)
received prior to the request-line.
"""
parser = HTTPParser()
parser.parse(b"\r\n\r\n")
parser.parse(b"\r\n\nGET / HTTP/1.1\r\n\r\n")
assert parser.http_version == b"HTTP/1.1"
assert parser.method == b"GET"
assert parser.path == b"/"
assert parser.query is None
assert parser.headers == []
def test_parser_handles_newlines(self):
"""
RFC 7230 Section 3.5 states:
Although the line terminator for the start-line and header fields is
the sequence CRLF, a recipient MAY recognize a single LF as a line
terminator and ignore any preceding CR.
"""
parser = HTTPParser()
parser.parse(b"GET / HTTP/1.1\nFoo: Bar\n\n")
assert parser.http_version == b"HTTP/1.1"
assert parser.method == b"GET"
assert parser.path == b"/"
assert parser.query is None
assert parser.headers == [(b"Foo", b"Bar")]
|
|
import datetime
from django import http
from django.contrib import messages
from django.shortcuts import render, redirect, get_object_or_404
from django.utils import timezone
from django.db import transaction
from django.core.urlresolvers import reverse
from airmozilla.main.models import (
Event,
Template,
SuggestedEvent,
SuggestedEventComment,
LocationDefaultEnvironment,
Approval,
)
from airmozilla.manage import forms
from airmozilla.manage import sending
from airmozilla.comments.models import Discussion, SuggestedDiscussion
from .decorators import staff_required, permission_required
@staff_required
@permission_required('main.add_event')
def suggestions(request):
context = {}
events = (
SuggestedEvent.objects
.filter(accepted=None)
.exclude(first_submitted=None)
.order_by('submitted')
)
context['include_old'] = request.GET.get('include_old')
if not context['include_old']:
now = timezone.now()
then = now - datetime.timedelta(days=30)
events = events.filter(first_submitted__gte=then)
context['events'] = events
return render(request, 'manage/suggestions.html', context)
@staff_required
@permission_required('main.add_event')
@transaction.atomic
def suggestion_review(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
real_event_form = None
comment_form = forms.SuggestedEventCommentForm()
if request.method == 'POST':
if request.POST.get('unbounce'):
event.submitted = timezone.now()
event.save()
return redirect('manage:suggestion_review', event.pk)
if not event.submitted:
return http.HttpResponseBadRequest('Not submitted')
form = forms.AcceptSuggestedEventForm(
request.POST,
instance=event,
)
if request.POST.get('save_comment'):
comment_form = forms.SuggestedEventCommentForm(data=request.POST)
if comment_form.is_valid():
comment = SuggestedEventComment.objects.create(
comment=comment_form.cleaned_data['comment'].strip(),
user=request.user,
suggested_event=event
)
sending.email_about_suggestion_comment(
comment,
request.user,
request
)
messages.info(
request,
'Comment added and %s notified.' % comment.user.email
)
return redirect('manage:suggestion_review', event.pk)
reject = request.POST.get('reject')
if reject:
form.fields['review_comments'].required = True
if not request.POST.get('save_comment') and form.is_valid():
form.save()
if reject:
event.submitted = None
event.status = SuggestedEvent.STATUS_REJECTED
event.save()
sending.email_about_rejected_suggestion(
event,
request.user,
request
)
messages.info(
request,
'Suggested event bounced back and %s has been emailed'
% (event.user.email,)
)
url = reverse('manage:suggestions')
return redirect(url)
else:
dict_event = {
'title': event.title,
'description': event.description,
'short_description': event.short_description,
'start_time': event.start_time,
'timezone': event.location.timezone,
'location': event.location.pk,
'channels': [x.pk for x in event.channels.all()],
'call_info': event.call_info,
'privacy': event.privacy,
'popcorn_url': event.popcorn_url,
'estimated_duration': event.estimated_duration,
'topics': [x.pk for x in event.topics.all()],
}
if dict_event['popcorn_url'] == 'https://':
dict_event['popcorn_url'] = ''
real_event_form = forms.EventRequestForm(
data=dict_event,
)
real_event_form.fields['placeholder_img'].required = False
if real_event_form.is_valid():
real = real_event_form.save(commit=False)
real.placeholder_img = event.placeholder_img
real.picture = event.picture
real.slug = event.slug
real.additional_links = event.additional_links
real.remote_presenters = event.remote_presenters
real.creator = request.user
if real.popcorn_url and not event.upcoming:
real.archive_time = real.start_time
if event.upcoming:
real.status = Event.STATUS_SUBMITTED
# perhaps we have a default location template
# environment
if real.location:
try:
default = (
LocationDefaultEnvironment.objects
.get(
location=real.location,
privacy=real.privacy
)
)
real.template = default.template
real.template_environment = (
default.template_environment
)
except LocationDefaultEnvironment.DoesNotExist:
pass
else:
real.status = Event.STATUS_PENDING
real.save()
[real.tags.add(x) for x in event.tags.all()]
[real.channels.add(x) for x in event.channels.all()]
[real.topics.add(x) for x in event.topics.all()]
event.accepted = real
event.save()
# create the necessary approval bits
if event.privacy == Event.PRIVACY_PUBLIC:
groups = []
for topic in real.topics.filter(is_active=True):
for group in topic.groups.all():
if group not in groups:
groups.append(group)
for group in groups:
Approval.objects.create(
event=real,
group=group,
)
sending.email_about_approval_requested(
real,
group,
request
)
try:
discussion = SuggestedDiscussion.objects.get(
event=event,
enabled=True
)
real_discussion = Discussion.objects.create(
enabled=True,
event=real,
notify_all=discussion.notify_all,
moderate_all=discussion.moderate_all,
)
for moderator in discussion.moderators.all():
real_discussion.moderators.add(moderator)
except SuggestedDiscussion.DoesNotExist:
pass
# if this is a popcorn event, and there is a default
# popcorn template, then assign that
if real.popcorn_url:
real.status = Event.STATUS_SCHEDULED
templates = Template.objects.filter(
default_popcorn_template=True
)
for template in templates[:1]:
real.template = template
real.save()
sending.email_about_accepted_suggestion(
event,
real,
request
)
messages.info(
request,
'New event created from suggestion.'
)
if real.popcorn_url or not event.upcoming:
url = reverse('manage:events')
else:
url = reverse('manage:event_edit', args=(real.pk,))
return redirect(url)
else:
print real_event_form.errors
else:
form = forms.AcceptSuggestedEventForm(instance=event)
# we don't need the label for this form layout
comment_form.fields['comment'].label = ''
comments = (
SuggestedEventComment.objects
.filter(suggested_event=event)
.select_related('User')
.order_by('created')
)
discussion = None
for each in SuggestedDiscussion.objects.filter(event=event):
discussion = each
context = {
'event': event,
'form': form,
'real_event_form': real_event_form,
'comment_form': comment_form,
'comments': comments,
'discussion': discussion,
}
return render(request, 'manage/suggestion_review.html', context)
|
|
#!/usr/local/bin/python
import sys, os, time, traceback
from pyomo.environ import *
from pyomo.opt import SolverFactory, SolverStatus, TerminationCondition
def add_relative_path(*parts):
""" Adds a new path to sys.path.
The path should be specified relative to the current module,
and specified as a list of directories to traverse to reach the
final destination."""
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), *parts)))
# note: switch and switch-hawaii should normally be cloned
# into the directory for each scenario, and then during active
# developmnet they should be refreshed periodically and the scenario
# files should also be updated to address any changes. This way,
# when a scenario is archived, it retains the appropriate versions
# of switch and switch-hawaii, so it can be re-run if needed.
add_relative_path('switch') # standard switch model
import switch_mod.utilities as utilities
from switch_mod.utilities import define_AbstractModel
add_relative_path('switch-hawaii-core') # common components of switch-hawaii
import util
from util import tic, toc, log
add_relative_path('.') # components for this particular study
opt = SolverFactory("cplex", solver_io="nl")
# tell cplex to find an irreducible infeasible set (and report it)
#opt.options['iisfind'] = 1
# relax the integrality constraints, to allow commitment constraints to match up with
# number of units available
opt.options['mipgap'] = 0.001
# display more information during solve
opt.options['display'] = 1
opt.options['bardisplay'] = 1
# define global variables for convenient access in interactive session
switch_model = None
switch_instance = None
results = None
def solve(
inputs='inputs',
rps=True, demand_response=True, renewables=True, ev=None, pumped_hydro=True,
ph_year=None, ph_mw=None,
tag=None
):
global switch_model, switch_instance, results
modules = ['switch_mod', 'fuel_cost', 'project.no_commit', 'switch_patch', 'batteries']
if rps:
modules.append('rps')
if not renewables:
modules.append('no_renewables')
if demand_response:
modules.append('simple_dr')
if ev is None:
# not specified, leave out ev's
pass
elif ev:
# user asked for ev
modules.append('ev')
else:
# user asked for no_ev
modules.append('no_ev')
if pumped_hydro:
modules.append('pumped_hydro')
log('using modules: {m}\n'.format(m=modules))
log("defining model... "); tic()
switch_model = define_AbstractModel(*modules)
switch_model.iis = Suffix(direction=Suffix.IMPORT)
switch_model.dual = Suffix(direction=Suffix.IMPORT)
# force construction of a fixed amount of pumped hydro
if pumped_hydro and ph_mw is not None:
print "Forcing construction of {m} MW of pumped hydro.".format(m=ph_mw)
switch_model.Build_Pumped_Hydro_MW = Constraint(switch_model.LOAD_ZONES, rule=lambda m, z:
m.Pumped_Hydro_Capacity_MW[z, m.PERIODS.last()] == ph_mw
)
# force construction of pumped hydro only in a certain period
if pumped_hydro and ph_year is not None:
print "Allowing construction of pumped hydro only in {p}.".format(p=ph_year)
switch_model.Build_Pumped_Hydro_Year = Constraint(
switch_model.LOAD_ZONES, switch_model.PERIODS,
rule=lambda m, z, p:
m.BuildPumpedHydroMW[z, p] == 0 if p != ph_year else Constraint.Skip
)
toc() # done defining model
log("loading model data from {} dir... ".format(inputs)); tic()
switch_instance = switch_model.load_inputs(inputs_dir=inputs)
toc()
log("solving model...\n"); tic()
results = opt.solve(switch_instance, keepfiles=False, tee=True,
symbolic_solver_labels=True, suffixes=['dual', 'iis'])
log("Solver finished; "); toc()
# results.write()
log("loading solution... "); tic()
# Pyomo changed their interface for loading results somewhere
# between 4.0.x and 4.1.x in a way that was not backwards compatible.
# Make the code accept either version
if hasattr(switch_instance, 'solutions'):
# Works in Pyomo version 4.1.x
switch_instance.solutions.load_from(results)
else:
# Works in Pyomo version 4.0.9682
switch_instance.load(results)
toc()
if results.solver.termination_condition == TerminationCondition.infeasible:
print "Model was infeasible; Irreducible Infeasible Set (IIS) returned by solver:"
print "\n".join(c.cname() for c in switch_instance.iis)
if util.interactive_session:
print "Unsolved model is available as switch_instance."
raise RuntimeError("Infeasible model")
if util.interactive_session:
print "Model solved successfully."
print "Solved model is available as switch_instance."
print "\n\n======================================================="
print "Solved model"
print "======================================================="
print "Total cost: ${v:,.0f}".format(v=value(switch_instance.Minimize_System_Cost))
if pumped_hydro:
switch_instance.BuildPumpedHydroMW.pprint()
write_results(switch_instance, tag=tag)
def write_results(m, tag=None):
# format the tag to append to file names (if any)
if tag is not None:
t = "_"+str(tag)
else:
t = ""
# make sure there's a valid output directory
output_dir = "outputs"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.isdir(output_dir):
raise RuntimeError("Unable to create output directory {dir}.".format(dir=output_dir))
# write out results
util.write_table(m, m.TIMEPOINTS,
output_file=os.path.join(output_dir, "dispatch{t}.txt".format(t=t)),
headings=("timepoint_label",)+tuple(m.PROJECTS),
values=lambda m, t: (m.tp_timestamp[t],) + tuple(
m.DispatchProj_AllTimePoints[p, t]
for p in m.PROJECTS
)
)
util.write_table(
m, m.LOAD_ZONES, m.TIMEPOINTS,
output_file=os.path.join(output_dir, "energy_sources{t}.txt".format(t=t)),
headings=
("load_zone", "timepoint_label")
+tuple(m.FUELS)
+tuple(m.NON_FUEL_ENERGY_SOURCES)
+tuple("curtail_"+s for s in m.NON_FUEL_ENERGY_SOURCES)
+tuple(m.LZ_Energy_Components_Produce)
+tuple(m.LZ_Energy_Components_Consume)
+("marginal_cost",),
values=lambda m, z, t:
(z, m.tp_timestamp[t])
+tuple(
sum(m.DispatchProj_AllTimePoints[p, t] for p in m.PROJECTS_BY_FUEL[f])
for f in m.FUELS
)
+tuple(
sum(m.DispatchProj_AllTimePoints[p, t] for p in m.PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s])
for s in m.NON_FUEL_ENERGY_SOURCES
)
+tuple(
sum(
m.DispatchUpperLimit_AllTimePoints[p, t] - m.DispatchProj_AllTimePoints[p, t]
for p in m.PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s]
)
for s in m.NON_FUEL_ENERGY_SOURCES
)
+tuple(sum(getattr(m, component)[lz, t] for lz in m.LOAD_ZONES)
for component in m.LZ_Energy_Components_Produce)
+tuple(sum(getattr(m, component)[lz, t] for lz in m.LOAD_ZONES)
for component in m.LZ_Energy_Components_Consume)
+(m.dual[m.Energy_Balance[z, t]]/m.bring_timepoint_costs_to_base_year[t],)
)
built_proj = tuple(set(
pr for pe in m.PERIODS for pr in m.PROJECTS if value(m.ProjCapacity[pr, pe]) > 0.001
))
util.write_table(m, m.PERIODS,
output_file=os.path.join(output_dir, "capacity{t}.txt".format(t=t)),
headings=("period",)+built_proj,
values=lambda m, pe: (pe,) + tuple(m.ProjCapacity[pr, pe] for pr in built_proj)
)
# import pprint
# b=[(pr, pe, value(m.BuildProj[pr, pe]), m.proj_gen_tech[pr], m.proj_overnight_cost[pr, pe]) for (pr, pe) in m.BuildProj if value(m.BuildProj[pr, pe]) > 0]
# bt=set(x[3] for x in b) # technologies
# pprint([(t, sum(x[2] for x in b if x[3]==t), sum(x[4] for x in b if x[3]==t)/sum(1.0 for x in b if x[3]==t)) for t in bt])
###############
if __name__ == '__main__':
scenarios=[
['no_rps', 'no_renewables', 'no_pumped_hydro', 'tag=base'],
['no_pumped_hydro', 'tag=base_rps'],
['ph_year=2021', 'ph_mw=200', 'tag=ph2021_200'],
['ph_year=2021', 'tag=ph2021'],
['ph_year=2045', 'ph_mw=200', 'tag=ph2045_200'],
['ph_year=2045', 'tag=ph2045'],
['ph_year=2029', 'ph_mw=200', 'tag=ph2029_200'],
['ph_year=2029', 'tag=ph2029'],
['ph_year=2037', 'ph_mw=200', 'tag=ph2037_200'],
['ph_year=2037', 'tag=ph2037'],
]
# catch errors so the user can continue with a solved model
try:
for scenario in scenarios:
args=dict() # have to create a new dict, not just assign an empty one, which would get reused
for arg in sys.argv[1:] + scenario: # evaluate command line arguments, then scenario arguments
if '=' in arg: # e.g., tag=base
(label, val) = arg.split('=', 1)
for t in [int, float, str]: # try to convert the value to these types, in order
try:
# convert the value to the specified type
val=t(val)
break
except ValueError:
# ignore value errors, move on to the next
pass
if label=='tag' and 'tag' in args:
# concatenate tags, otherwise override previous values
val = args['tag'] + '_' + val
args[label]=val
elif arg.startswith('no_'): # e.g., no_pumped_hydro
args[arg[3:]] = False
else: # e.g., ev
args[arg] = True
# for each scenario:
print 'arguments: {}'.format(args)
# if args['tag'] == 'test_base':
# print "skipping base scenario"
# continue
solve(**args)
except Exception, e:
traceback.print_exc()
print "ERROR:", e
|
|
# coding: utf-8
# In[ ]:
from __future__ import print_function
import numpy as np
from datetime import datetime as dt
from sklearn.linear_model import LogisticRegression
import pickle
import sys
import os
import errno
from rankpruning import RankPruning, other_pnlearning_methods
from util import get_dataset, downsample, get_metrics, make_sure_path_exists
# In[ ]:
def get_model(key = None, rh1 = None, rh0 = None, clf = None):
models = {
"Rank Pruning" : RankPruning(clf = clf),
"Baseline" : other_pnlearning_methods.BaselineNoisyPN(clf = clf),
"True Classifier": clf,
"Rank Pruning (noise rates given)": RankPruning(rh1, rh0, clf = clf),
"Elk08 (noise rates given)": other_pnlearning_methods.Elk08(e1 = 1 - rh1, clf = clf),
"Liu16 (noise rates given)": other_pnlearning_methods.Liu16(rh1, rh0, clf = clf),
"Nat13 (noise rates given)": other_pnlearning_methods.Nat13(rh1, rh0, clf = clf),
}
try:
model = models[key]
except:
model = None
return model
# In[ ]:
def run_test(
dataset,
clf_type,
epochs,
true_rh1,
downsample_ratio,
ordered_models_keys,
list_of_images = range(10),
suppress_error = False,
verbose = False,
pi1 = 0.0,
one_vs_rest = True,
cv_n_folds = 3,
early_stopping = True,
pulearning = None,
):
# Cast types to ensure consistency for 1 and 1.0, 0 and 0.0
true_rh1 = float(true_rh1)
downsample_ratio = float(downsample_ratio)
pi1 = float(pi1)
# Load MNIST or CIFAR data
(X_train_original, y_train_original), (X_test_original, y_test_original) = get_dataset(dataset = dataset)
X_train_original, y_train_original = downsample(X_train_original, y_train_original, downsample_ratio)
# Initialize models and result storage
metrics = {key:[] for key in ordered_models_keys}
data_all = {"metrics": metrics, "calculated": {}, "errors": {}}
start_time = dt.now()
# Run through the ten images class of 0, 1, ..., 9
for image in list_of_images:
if one_vs_rest:
# X_train and X_test will not be modified. All data will be used. Adjust pointers.
X_train = X_train_original
X_test = X_test_original
# Relabel the image data. Make label 1 only for given image.
y_train = np.array(y_train_original == image, dtype=int)
y_test = np.array(y_test_original == image, dtype=int)
else: # one_vs_other
# Reducing the dataset to just contain our image and image = 4
other_image = 4 if image != 4 else 7
X_train = X_train_original[(y_train_original == image) | (y_train_original == other_image)]
y_train = y_train_original[(y_train_original == image) | (y_train_original == other_image)]
X_test = X_test_original[(y_test_original == image) | (y_test_original == other_image)]
y_test = y_test_original[(y_test_original == image) | (y_test_original == other_image)]
# Relabel the data. Make label 1 only for given image.
y_train = np.array(y_train == image, dtype=int)
y_test = np.array(y_test == image, dtype=int)
print()
print("Evaluating image:", image)
print("Number of positives in y:", sum(y_train))
print()
sys.stdout.flush()
s = y_train * (np.cumsum(y_train) < (1 - true_rh1) * sum(y_train))
# In the presence of mislabeled negative (negative incorrectly labeled positive):
# pi1 is the fraction of mislabeled negative in the labeled set:
num_mislabeled = int(sum(y_train) * (1 - true_rh1) * pi1 / (1 - pi1))
if num_mislabeled > 0:
negative_set = s[y_train==0]
mislabeled = np.random.choice(len(negative_set), num_mislabeled, replace = False)
negative_set[mislabeled] = 1
s[y_train==0] = negative_set
print("image = {0}".format(image))
print("Training set: total = {0}, positives = {1}, negatives = {2}, P_noisy = {3}, N_noisy = {4}"
.format(len(X_train), sum(y_train), len(y_train)-sum(y_train), sum(s), len(s)-sum(s)))
print("Testing set: total = {0}, positives = {1}, negatives = {2}"
.format(len(X_test), sum(y_test), len(y_test) - sum(y_test)))
# Fit different models for PU learning
for key in ordered_models_keys:
fit_start_time = dt.now()
print("\n\nFitting {0} classifier. Default classifier is {1}.".format(key, clf_type))
if clf_type == "logreg":
clf = LogisticRegression()
elif clf_type == "cnn":
from classifier_cnn import CNN
from keras import backend as K
K.clear_session()
clf = CNN(
dataset_name = dataset,
num_category = 2,
epochs = epochs,
early_stopping = early_stopping,
verbose = 1,
)
else:
raise ValueError("clf_type must be either logreg or cnn for this testing file.")
ps1 = sum(s) / float(len(s))
py1 = sum(y_train) / float(len(y_train))
true_rh0 = pi1 * ps1 / float(1 - py1)
model = get_model(
key = key,
rh1 = true_rh1,
rh0 = true_rh0,
clf = clf,
)
try:
if key == "True Classifier":
model.fit(X_train, y_train)
elif key in ["Rank Pruning", "Rank Pruning (noise rates given)", "Liu16 (noise rates given)"]:
model.fit(X_train, s, pulearning = pulearning, cv_n_folds = cv_n_folds)
elif key in ["Nat13 (noise rates given)"]:
model.fit(X_train, s, pulearning = pulearning)
else: # Elk08, Baseline
model.fit(X_train, s)
pred = model.predict(X_test)
# Produces only P(y=1|x) for pulearning models because they are binary
pred_prob = model.predict_proba(X_test)
pred_prob = pred_prob[:,1] if key == "True Classifier" else pred_prob
# Compute metrics
metrics_dict = get_metrics(pred, pred_prob, y_test)
elapsed = (dt.now() - fit_start_time).total_seconds()
if verbose:
print("\n{0} Model Performance at image {1}:\n=================\n".format(key, image))
print("Time Required", elapsed)
print("AUC:", metrics_dict["AUC"])
print("Error:", metrics_dict["Error"])
print("Precision:", metrics_dict["Precision"])
print("Recall:", metrics_dict["Recall"])
print("F1 score:", metrics_dict["F1 score"])
print("rh1:", model.rh1 if hasattr(model, 'rh1') else None)
print("rh0:", model.rh0 if hasattr(model, 'rh0') else None)
print()
metrics_dict["image"] = image
metrics_dict["time_seconds"] = elapsed
metrics_dict["rh1"] = model.rh1 if hasattr(model, 'rh1') else None
metrics_dict["rh0"] = model.rh0 if hasattr(model, 'rh0') else None
# Append dictionary of error and loss metrics
if key not in data_all["metrics"]:
data_all["metrics"][key] = [metrics_dict]
else:
data_all["metrics"][key].append(metrics_dict)
data_all["calculated"][(key, image)] = True
except Exception as e:
msg = "Error in {0}, image {1}, rh1 {2}, m {3}: {4}\n".format(key, image, true_rh1, pi1, e)
print(msg)
make_sure_path_exists("failed_models/")
with open("failed_models/" + key + ".txt", "ab") as f:
f.write(msg)
if suppress_error:
continue
else:
raise
return data_all
# In[ ]:
try:
image_index = int(sys.argv[1])
except:
image_index = None
try:
model_index = int(sys.argv[2])
except:
model_index = None
image_list = range(10)
ordered_models_keys = [
"Rank Pruning",
"Rank Pruning (noise rates given)",
"Elk08 (noise rates given)",
"Nat13 (noise rates given)",
"Liu16 (noise rates given)",
"Baseline",
"True Classifier",
]
if image_index is not None:
# Select only the single element
# otherwise all images are tested.
image_list = [image_list[image_index]]
if model_index is not None:
# Select only the single model
# otherwise all models are tested.
ordered_models_keys = [ordered_models_keys[model_index]]
for image in image_list:
for pi1, true_rh1 in [(0.5, 0.5), (0.25, 0.25), (0.5, 0.0), (0.0, 0.5)]:
for model in ordered_models_keys:
# Parameter settings:
dataset = "mnist" # choose between mnist and cifar
downsample_ratio = 0.5 # What fraction of data to keep for speed increase
# clf specific settings:
clf_type = "logreg" # "logreg" or "cnn"
epochs = 50
cv_n_folds = 3
early_stopping = True
# Other settings (currently need not change):
suppress_error = False
verbose = True
one_vs_rest = True # Default is True, False -> test one vs other
pulearning = (pi1 == 0)
print("[***]", "true_rh1 =", true_rh1)
print("[***]", "image =", image)
print("[***]", "pi1 =", pi1)
print("[***]", "downsample_ratio =", downsample_ratio)
print("[***] {0} TEST: One vs.".format(dataset), "Rest" if one_vs_rest else "Other")
data_all = run_test(
dataset = dataset,
clf_type = clf_type,
epochs = epochs,
true_rh1 = true_rh1,
downsample_ratio = downsample_ratio,
ordered_models_keys = [model],
list_of_images = [image],
suppress_error = suppress_error,
verbose = verbose,
pi1 = pi1,
one_vs_rest = one_vs_rest,
cv_n_folds = cv_n_folds,
early_stopping = early_stopping,
pulearning = pulearning,
)
print("Completed: model", model, "and image", image)
# Before we store results, create folder if needed.
make_sure_path_exists("data/")
pickle.dump(data_all, open("data/metrics_{0}_{1}_{2}_epochs_rh1_{3}_downsample_{4}_model_{5}_image_{6}_pi1_{7}.p".format(dataset, clf_type, epochs, true_rh1, downsample_ratio, model, image, pi1),"wb"))
|
|
from collections.abc import Iterable
from jsonschema.compat import str_types
from loguru import logger
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
logger = logger.bind(name='from_imdb')
class FromIMDB:
"""
This plugin enables generating entries based on an entity, an entity being a person, character or company.
It's based on IMDBpy which is required (pip install imdbpy). The basic config required just an IMDB ID of the
required entity.
For example:
from_imdb: ch0001354
Schema description:
Other than ID, all other properties are meant to filter the full list that the entity generates.
id: string that relates to a supported entity type. For example: 'nm0000375'. Required.
job_types: a string or list with job types from job_types. Default is 'actor'.
content_types: A string or list with content types from content_types. Default is 'movie'.
max_entries: The maximum number of entries that can return. This value's purpose is basically flood protection
against unruly configurations that will return too many results. Default is 200.
Advanced config example:
dynamic_movie_queue:
from_imdb:
id: co0051941
job_types:
- actor
- director
content_types: tv series
accept_all: yes
movie_queue: add
"""
job_types = [
'actor',
'actress',
'director',
'producer',
'writer',
'self',
'editor',
'miscellaneous',
'editorial department',
'cinematographer',
'visual effects',
'thanks',
'music department',
'in development',
'archive footage',
'soundtrack',
]
content_types = [
'movie',
'tv series',
'tv mini series',
'video game',
'video movie',
'tv movie',
'episode',
]
content_type_conversion = {
'movie': 'movie',
'tv series': 'tv',
'tv mini series': 'tv',
'tv movie': 'tv',
'episode': 'tv',
'video movie': 'video',
'video game': 'video game',
}
character_content_type_conversion = {
'movie': 'feature',
'tv series': 'tv',
'tv mini series': 'tv',
'tv movie': 'tv',
'episode': 'tv',
'video movie': 'video',
'video game': 'video-game',
}
jobs_without_content_type = ['actor', 'actress', 'self', 'in development', 'archive footage']
imdb_pattern = one_or_more(
{
'type': 'string',
'pattern': r'(nm|co|ch)\d{7,8}',
'error_pattern': 'Get the id from the url of the person/company you want to use,'
' e.g. http://imdb.com/text/<id here>/blah',
},
unique_items=True,
)
schema = {
'oneOf': [
imdb_pattern,
{
'type': 'object',
'properties': {
'id': imdb_pattern,
'job_types': one_or_more(
{'type': 'string', 'enum': job_types}, unique_items=True
),
'content_types': one_or_more(
{'type': 'string', 'enum': content_types}, unique_items=True
),
'max_entries': {'type': 'integer'},
'match_type': {'type': 'string', 'enum': ['strict', 'loose']},
},
'required': ['id'],
'additionalProperties': False,
},
]
}
def prepare_config(self, config):
"""
Converts config to dict form and sets defaults if needed
"""
config = config
if isinstance(config, str):
config = {'id': [config]}
elif isinstance(config, list):
config = {'id': config}
if isinstance(config, dict) and not isinstance(config['id'], list):
config['id'] = [config['id']]
config.setdefault('content_types', [self.content_types[0]])
config.setdefault('job_types', [self.job_types[0]])
config.setdefault('max_entries', 200)
config.setdefault('match_type', 'strict')
if isinstance(config.get('content_types'), str_types):
logger.debug('Converted content type from string to list.')
config['content_types'] = [config['content_types']]
if isinstance(config['job_types'], str_types):
logger.debug('Converted job type from string to list.')
config['job_types'] = [config['job_types']]
# Special case in case user meant to add actress instead of actor (different job types in IMDB)
if 'actor' in config['job_types'] and 'actress' not in config['job_types']:
config['job_types'].append('actress')
return config
def get_items(self, config):
items = []
for id in config['id']:
try:
entity_type, entity_object = self.get_entity_type_and_object(id)
except Exception as e:
logger.error(
'Could not resolve entity via ID: {}. '
'Either error in config or unsupported entity. Error:{}',
id,
e,
)
continue
items += self.get_items_by_entity(
entity_type,
entity_object,
config.get('content_types'),
config.get('job_types'),
config.get('match_type'),
)
return set(items)
def get_entity_type_and_object(self, imdb_id):
"""
Return a tuple of entity type and entity object
:param imdb_id: string which contains IMDB id
:return: entity type, entity object (person, company, etc.)
"""
if imdb_id.startswith('nm'):
person = self.ia.get_person(imdb_id[2:])
logger.info('Starting to retrieve items for person: {}', person)
return 'Person', person
elif imdb_id.startswith('co'):
company = self.ia.get_company(imdb_id[2:])
logger.info('Starting to retrieve items for company: {}', company)
return 'Company', company
elif imdb_id.startswith('ch'):
character = self.ia.get_character(imdb_id[2:])
logger.info('Starting to retrieve items for Character: {}', character)
return 'Character', character
def get_items_by_entity(
self, entity_type, entity_object, content_types, job_types, match_type
):
"""
Gets entity object and return movie list using relevant method
"""
if entity_type == 'Company':
return self.items_by_company(entity_object)
if entity_type == 'Character':
return self.items_by_character(entity_object, content_types, match_type)
elif entity_type == 'Person':
return self.items_by_person(entity_object, job_types, content_types, match_type)
def flatten_list(self, _list):
"""
Gets a list of lists and returns a flat list
"""
for el in _list:
if isinstance(el, Iterable) and not isinstance(el, str):
for sub in self.flatten_list(el):
yield sub
else:
yield el
def flat_list(self, non_flat_list, remove_none=False):
flat_list = self.flatten_list(non_flat_list)
if remove_none:
flat_list = [_f for _f in flat_list if _f]
return flat_list
def filtered_items(self, unfiltered_items, content_types, match_type):
items = []
unfiltered_items = set(unfiltered_items)
for item in sorted(unfiltered_items):
if match_type == 'strict':
logger.debug(
'Match type is strict, verifying item type to requested content types'
)
self.ia.update(item)
if item['kind'] in content_types:
logger.verbose(
'Adding item "{}" to list. Item kind is "{}"', item, item['kind']
)
items.append(item)
else:
logger.verbose('Rejecting item "{}". Item kind is "{}', item, item['kind'])
else:
logger.debug('Match type is loose, all items are being added')
items.append(item)
return items
def items_by_person(self, person, job_types, content_types, match_type):
"""
Return item list for a person object
"""
unfiltered_items = self.flat_list(
[self.items_by_job_type(person, job_type, content_types) for job_type in job_types],
remove_none=True,
)
return self.filtered_items(unfiltered_items, content_types, match_type)
def items_by_content_type(self, person, job_type, content_type):
return [
_f
for _f in (person.get(job_type + ' ' + self.content_type_conversion[content_type], []))
if _f
]
def items_by_job_type(self, person, job_type, content_types):
items = (
person.get(job_type, [])
if job_type in self.jobs_without_content_type
else [
person.get(job_type + ' ' + 'documentary', [])
and person.get(job_type + ' ' + 'short', [])
and self.items_by_content_type(person, job_type, content_type)
if content_type == 'movie'
else self.items_by_content_type(person, job_type, content_type)
for content_type in content_types
]
)
return [_f for _f in items if _f]
def items_by_character(self, character, content_types, match_type):
"""
Return items list for a character object
:param character: character object
:param content_types: content types as defined in config
:return:
"""
unfiltered_items = self.flat_list(
[
character.get(self.character_content_type_conversion[content_type])
for content_type in content_types
],
remove_none=True,
)
return self.filtered_items(unfiltered_items, content_types, match_type)
def items_by_company(self, company):
"""
Return items list for a company object
:param company: company object
:return: company items list
"""
return company.get('production companies')
@cached('from_imdb', persist='2 hours')
def on_task_input(self, task, config):
try:
from imdb import IMDb
self.ia = IMDb()
except ImportError:
logger.error(
'IMDBPY is required for this plugin. Please install using "pip install imdbpy"'
)
return
entries = []
config = self.prepare_config(config)
items = self.get_items(config)
if not items:
logger.error('Could not get IMDB item list, check your configuration.')
return
for item in items:
entry = Entry(
title=item['title'],
imdb_id='tt' + self.ia.get_imdbID(item),
url='',
imdb_url=self.ia.get_imdbURL(item),
)
if entry.isvalid():
if entry not in entries:
entries.append(entry)
if entry and task.options.test:
logger.info("Test mode. Entry includes:")
for key, value in list(entry.items()):
logger.info(' {}: {}', key.capitalize(), value)
else:
logger.error('Invalid entry created? {}', entry)
if len(entries) <= config.get('max_entries'):
return entries
else:
logger.warning(
'Number of entries ({}) exceeds maximum allowed value {}. '
'Edit your filters or raise the maximum value by entering a higher "max_entries"',
len(entries),
config.get('max_entries'),
)
return
@event('plugin.register')
def register_plugin():
plugin.register(FromIMDB, 'from_imdb', api_ver=2)
|
|
#!/usr/bin/env python
"""Preprocess Amazon movies data to use as training or test data in the LSTM NN"""
import pandas as pd
import numpy as np
import gc
from nltk.tokenize.casual import TweetTokenizer
import emoji
from usherwood_ds.nlp.processing.word_embedding import WordEmbedding
__author__ = "Peter J Usherwood"
__python_version__ = "3.5"
def parse_big_file_to_batches(file,
gen_words,
gen_emoji,
sequence_length=250,
size=300,
path='E:/data_sets/sentiments/train_sets/amazon_movies_full/',
text_field='Cleaned',
score_field='Score',
file_n='0',
classes=[1, 2, 3, 4, 5],
return_df=False):
for start in range(0, 500000, 100000):
df = pd.read_csv(path + file,
nrows=100000,
header=None,
skiprows=1 + start)
df.columns = columns
# ADHOC (comment it out if you want to run it on 5 values)
# df['Score - Binary'] = 0
# df.ix[df['Score'] == 1, 'Score - Binary'] = 'neg'
# df.ix[df['Score'] == 5, 'Score - Binary'] = 'pos'
# df = df[~(df['Score - Binary'] == 0)]
print(len(df))
block = str(str(start)[:-5])
if start == 0:
block = '0'
print(len(df))
if return_df:
df = turn_block_to_batches(df=df,
gen_words=gen_words,
gen_emoji=gen_emoji,
sequence_length=sequence_length,
size=size,
text_field=text_field,
score_field=score_field,
file_n=file_n,
block=block,
classes=classes,
return_df=return_df)
return df
else:
turn_block_to_batches(df=df,
gen_words=gen_words,
gen_emoji=gen_emoji,
sequence_length=sequence_length,
size=size,
text_field=text_field,
score_field=score_field,
file_n=file_n,
block=block,
classes=classes,
return_df=return_df)
df = None
gc.collect()
return True
def turn_block_to_batches(df,
gen_words,
gen_emoji,
sequence_length=250,
size=300,
text_field='Cleaned',
score_field='Score',
file_n='0',
block='0',
classes=[1, 2, 3, 4, 5],
return_df=False):
n_classes = len(classes)
df_parse = df.copy()
df = None
df = limit_sentence_length_and_balance_classes(df=df_parse,
sequence_length=sequence_length,
text_field=text_field,
score_field=score_field,
classes=classes)
df_parse = None
if return_df:
return df
else:
parse_balanced_df_to_numpy_batches(df=df,
gen_words=gen_words,
gen_emoji=gen_emoji,
sequence_length=sequence_length,
size=size,
text_field=text_field,
score_field=score_field,
n_classes=n_classes,
file_n=file_n,
block=block)
return True
def limit_sentence_length_and_balance_classes(df,
sequence_length=250,
text_field='Cleaned',
score_field='Score',
classes=[1, 2, 3, 4, 5]):
word_counts = []
for review in df[text_field]:
try:
word_counts.append(len(TweetTokenizer().tokenize(review)))
except TypeError:
print(review)
word_counts.append(sequence_length + 10)
x = [True if (count <= sequence_length) and (count >= 20) else False for count in word_counts]
df = df.iloc[x].reset_index(drop=True)
print(len(df))
min_class = min(df[score_field].value_counts())
print(min_class)
df_pred = pd.DataFrame(columns=df.columns.values.tolist())
for cl in classes:
df_pred = df_pred.append(df[df[score_field] == cl].sample(n=min_class))
print(df_pred[score_field].value_counts())
df = None
df = df_pred
df_pred = None
df.reset_index(drop=True, inplace=True)
print(len(df))
return df
def parse_balanced_df_to_numpy_batches(df,
gen_words,
gen_emoji,
sequence_length=250,
size=300,
text_field='Cleaned',
score_field='Score',
n_classes=5,
file_n='0',
block='0'):
df.reset_index(drop=True, inplace=True)
data_Y = pd.get_dummies(df[score_field]).reset_index(drop=True).values
data_X = np.zeros((len(df), sequence_length, size), dtype=np.float32)
invalids = []
for ri, snippet in enumerate(df[text_field]):
invalid = 0
words = TweetTokenizer().tokenize(snippet)
for wi, word in enumerate(words):
try:
if word in emoji.UNICODE_EMOJI:
data_X[ri, wi] = gen_emoji.model[word]
else:
data_X[ri, wi] = gen_words.model[word]
except KeyError:
data_X[ri, wi] = np.zeros(size)
invalid += 1
invalids += [1 - (invalid / len(words))]
print(np.array(invalids).mean())
for i in range(int(len(data_Y) / (batch_size))):
size_per_class = int(len(data_Y) / n_classes)
batch_size_per_class = int(batch_size / n_classes)
start = i * batch_size_per_class
stop = (i + 1) * batch_size_per_class
ids = []
for j in range(n_classes):
ids += [a for a in range((start + (j * size_per_class)), (stop + (j * size_per_class)))]
train_X = data_X[ids, :, :]
train_Y = data_Y[ids, :]
permutation = np.random.permutation(train_Y.shape[0])
train_X = train_X[permutation, :, :]
train_Y = train_Y[permutation, :]
np.save(
'E:/data_sets/sentiments/train_sets/amazon_movies_we_balanced_chunks/X/train_Xf=' + file_n + 'b=' + block + 'i=' + str(
i), train_X)
np.save(
'E:/data_sets/sentiments/train_sets/amazon_movies_we_balanced_chunks/Y/train_Yf=' + file_n + 'b=' + block + 'i=' + str(
i), train_Y)
gc.collect()
return True
if __name__ == "__main__":
file_n = '3'
file = 'en_amazon_movies_1p5Mto2M.csv'
path = 'E:/data_sets/sentiments/train_sets/amazon_movies_full/'
# Limit to reviews under 250 chars
sequence_length = 250
batch_size = 20
size = 300 # sze of word embeddings
classes = [1, 2, 3, 4, 5]
score_field = 'Score' # change to Score if you want to run on 5 values and change the classes to 1-5
text_field = 'Cleaned'
if batch_size % len(classes) != 0:
raise Exception('the number of classes must be a fac tor of the batch size so that even chunks can be made')
gen_words = WordEmbedding()
gen_words.load_word2vec_model('E:/data_sets/word2vec_embeddings/GoogleNews-vectors-negative300.bin')
gen_emoji = WordEmbedding()
gen_emoji.load_word2vec_model('E:/data_sets/word2vec_embeddings/emoji2vec.bin')
df = pd.read_csv('E:/data_sets/sentiments/train_sets/amazon_movies_full/en_amazon_movies_0to500k.csv', nrows=10)
columns = df.columns
df = None
parse_big_file_to_batches(file=file,
gen_words=gen_words,
gen_emoji=gen_emoji,
sequence_length=sequence_length,
size=size,
path=path,
text_field='Cleaned',
score_field='Score',
file_n=file_n,
classes=classes)
|
|
# Webhooks for external integrations.
import logging
import re
from typing import Any, Dict, List, Optional, Tuple
import ujson
from django.conf import settings
from django.db.models import Q
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.lib.webhooks.common import check_send_webhook_message, \
UnexpectedWebhookEventType
from zerver.models import Realm, UserProfile, get_user_by_delivery_email
IGNORED_EVENTS = [
'comment_created', # we handle issue_update event instead
'comment_deleted', # we handle issue_update event instead
]
def guess_zulip_user_from_jira(jira_username: str, realm: Realm) -> Optional[UserProfile]:
try:
# Try to find a matching user in Zulip
# We search a user's full name, short name,
# and beginning of email address
user = UserProfile.objects.filter(
Q(full_name__iexact=jira_username) |
Q(short_name__iexact=jira_username) |
Q(email__istartswith=jira_username),
is_active=True,
realm=realm).order_by("id")[0]
return user
except IndexError:
return None
def convert_jira_markup(content: str, realm: Realm) -> str:
# Attempt to do some simplistic conversion of JIRA
# formatting to Markdown, for consumption in Zulip
# Jira uses *word* for bold, we use **word**
content = re.sub(r'\*([^\*]+)\*', r'**\1**', content)
# Jira uses {{word}} for monospacing, we use `word`
content = re.sub(r'{{([^\*]+?)}}', r'`\1`', content)
# Starting a line with bq. block quotes that line
content = re.sub(r'bq\. (.*)', r'> \1', content)
# Wrapping a block of code in {quote}stuff{quote} also block-quotes it
quote_re = re.compile(r'{quote}(.*?){quote}', re.DOTALL)
content = re.sub(quote_re, r'~~~ quote\n\1\n~~~', content)
# {noformat}stuff{noformat} blocks are just code blocks with no
# syntax highlighting
noformat_re = re.compile(r'{noformat}(.*?){noformat}', re.DOTALL)
content = re.sub(noformat_re, r'~~~\n\1\n~~~', content)
# Code blocks are delineated by {code[: lang]} {code}
code_re = re.compile(r'{code[^\n]*}(.*?){code}', re.DOTALL)
content = re.sub(code_re, r'~~~\n\1\n~~~', content)
# Links are of form: [https://www.google.com] or [Link Title|https://www.google.com]
# In order to support both forms, we don't match a | in bare links
content = re.sub(r'\[([^\|~]+?)\]', r'[\1](\1)', content)
# Full links which have a | are converted into a better markdown link
full_link_re = re.compile(r'\[(?:(?P<title>[^|~]+)\|)(?P<url>.*)\]')
content = re.sub(full_link_re, r'[\g<title>](\g<url>)', content)
# Try to convert a JIRA user mention of format [~username] into a
# Zulip user mention. We don't know the email, just the JIRA username,
# so we naively guess at their Zulip account using this
if realm:
mention_re = re.compile(u'\\[~(.*?)\\]')
for username in mention_re.findall(content):
# Try to look up username
user_profile = guess_zulip_user_from_jira(username, realm)
if user_profile:
replacement = u"**{}**".format(user_profile.full_name)
else:
replacement = u"**{}**".format(username)
content = content.replace("[~{}]".format(username,), replacement)
return content
def get_in(payload: Dict[str, Any], keys: List[str], default: str='') -> Any:
try:
for key in keys:
payload = payload[key]
except (AttributeError, KeyError, TypeError):
return default
return payload
def get_issue_string(payload: Dict[str, Any], issue_id: Optional[str]=None) -> str:
# Guess the URL as it is not specified in the payload
# We assume that there is a /browse/BUG-### page
# from the REST url of the issue itself
if issue_id is None:
issue_id = get_issue_id(payload)
base_url = re.match(r"(.*)\/rest\/api/.*", get_in(payload, ['issue', 'self']))
if base_url and len(base_url.groups()):
return u"[{}]({}/browse/{})".format(issue_id, base_url.group(1), issue_id)
else:
return issue_id
def get_assignee_mention(assignee_email: str, realm: Realm) -> str:
if assignee_email != '':
try:
assignee_name = get_user_by_delivery_email(assignee_email, realm).full_name
except UserProfile.DoesNotExist:
assignee_name = assignee_email
return u"**{}**".format(assignee_name)
return ''
def get_issue_author(payload: Dict[str, Any]) -> str:
return get_in(payload, ['user', 'displayName'])
def get_issue_id(payload: Dict[str, Any]) -> str:
return get_in(payload, ['issue', 'key'])
def get_issue_title(payload: Dict[str, Any]) -> str:
return get_in(payload, ['issue', 'fields', 'summary'])
def get_issue_subject(payload: Dict[str, Any]) -> str:
return u"{}: {}".format(get_issue_id(payload), get_issue_title(payload))
def get_sub_event_for_update_issue(payload: Dict[str, Any]) -> str:
sub_event = payload.get('issue_event_type_name', '')
if sub_event == '':
if payload.get('comment'):
return 'issue_commented'
elif payload.get('transition'):
return 'issue_transited'
return sub_event
def get_event_type(payload: Dict[str, Any]) -> Optional[str]:
event = payload.get('webhookEvent')
if event is None and payload.get('transition'):
event = 'jira:issue_updated'
return event
def add_change_info(content: str, field: str, from_field: str, to_field: str) -> str:
content += u"* Changed {}".format(field)
if from_field:
content += u" from **{}**".format(from_field)
if to_field:
content += u" to {}\n".format(to_field)
return content
def handle_updated_issue_event(payload: Dict[str, Any], user_profile: UserProfile) -> str:
# Reassigned, commented, reopened, and resolved events are all bundled
# into this one 'updated' event type, so we try to extract the meaningful
# event that happened
issue_id = get_in(payload, ['issue', 'key'])
issue = get_issue_string(payload, issue_id)
assignee_email = get_in(payload, ['issue', 'fields', 'assignee', 'emailAddress'], '')
assignee_mention = get_assignee_mention(assignee_email, user_profile.realm)
if assignee_mention != '':
assignee_blurb = u" (assigned to {})".format(assignee_mention)
else:
assignee_blurb = ''
sub_event = get_sub_event_for_update_issue(payload)
if 'comment' in sub_event:
if sub_event == 'issue_commented':
verb = 'added comment to'
elif sub_event == 'issue_comment_edited':
verb = 'edited comment on'
else:
verb = 'deleted comment from'
if payload.get('webhookEvent') == 'comment_created':
author = payload['comment']['author']['displayName']
else:
author = get_issue_author(payload)
content = u"{} **{}** {}{}".format(author, verb, issue, assignee_blurb)
comment = get_in(payload, ['comment', 'body'])
if comment:
comment = convert_jira_markup(comment, user_profile.realm)
content = u"{}:\n\n\n{}\n".format(content, comment)
else:
content = u"{} **updated** {}{}:\n\n".format(get_issue_author(payload), issue, assignee_blurb)
changelog = get_in(payload, ['changelog'])
if changelog != '':
# Use the changelog to display the changes, whitelist types we accept
items = changelog.get('items')
for item in items:
field = item.get('field')
if field == 'assignee' and assignee_mention != '':
target_field_string = assignee_mention
else:
# Convert a user's target to a @-mention if possible
target_field_string = u"**{}**".format(item.get('toString'))
from_field_string = item.get('fromString')
if target_field_string or from_field_string:
content = add_change_info(content, field, from_field_string, target_field_string)
elif sub_event == 'issue_transited':
from_field_string = get_in(payload, ['transition', 'from_status'])
target_field_string = u'**{}**'.format(get_in(payload, ['transition', 'to_status']))
if target_field_string or from_field_string:
content = add_change_info(content, 'status', from_field_string, target_field_string)
return content
def handle_created_issue_event(payload: Dict[str, Any]) -> str:
return u"{} **created** {} priority {}, assigned to **{}**:\n\n> {}".format(
get_issue_author(payload),
get_issue_string(payload),
get_in(payload, ['issue', 'fields', 'priority', 'name']),
get_in(payload, ['issue', 'fields', 'assignee', 'displayName'], 'no one'),
get_issue_title(payload)
)
def handle_deleted_issue_event(payload: Dict[str, Any]) -> str:
return u"{} **deleted** {}!".format(get_issue_author(payload), get_issue_string(payload))
@api_key_only_webhook_view("JIRA")
@has_request_variables
def api_jira_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
event = get_event_type(payload)
if event == 'jira:issue_created':
subject = get_issue_subject(payload)
content = handle_created_issue_event(payload)
elif event == 'jira:issue_deleted':
subject = get_issue_subject(payload)
content = handle_deleted_issue_event(payload)
elif event == 'jira:issue_updated':
subject = get_issue_subject(payload)
content = handle_updated_issue_event(payload, user_profile)
elif event == 'comment_created':
subject = get_issue_subject(payload)
content = handle_updated_issue_event(payload, user_profile)
elif event in IGNORED_EVENTS:
return json_success()
else:
raise UnexpectedWebhookEventType('Jira', event)
check_send_webhook_message(request, user_profile,
subject, content,
unquote_url_parameters=True)
return json_success()
|
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message):
if 'all' in gyp.debug.keys() or mode in gyp.debug.keys():
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file[-len(extension):] == extension:
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False, circular_check=True):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
flavor = None
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'generator_wants_absolute_build_file_paths':
getattr(generator, 'generator_wants_absolute_build_file_paths', False),
'generator_handles_variants':
getattr(generator, 'generator_handles_variants', False),
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, circular_check)
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
flags.append(FormatOpt(flag, predicate(flag_value)))
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('--msvs-version', dest='msvs_version',
regenerate=False,
help='Deprecated; use -G msvs_version=MSVS_VERSION instead')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
# We read a few things from ~/.gyp, so set up a var for that.
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
home = None
home_dot_gyp = None
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
# TODO(thomasvl): add support for ~/.gyp/defaults
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split('[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'" % (option, value))
else:
DebugOutput(DEBUG_GENERAL, " %s: %s" % (option, str(value)))
if not build_files:
build_files = FindBuildFiles()
if not build_files:
print >>sys.stderr, (usage + '\n\n%s: error: no build_file') % \
(my_name, my_name)
return 1
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise Exception, \
'Could not automatically locate src directory. This is a ' + \
'temporary Chromium feature that will be removed. Use ' + \
'--depth as a workaround.'
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s" % cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
print 'Using overrides found in ' + default_include
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s" % generator_flags)
# TODO: Remove this and the option after we've gotten folks to move to the
# generator flag.
if options.msvs_version:
print >>sys.stderr, \
'DEPRECATED: Use generator flag (-G msvs_version=' + \
options.msvs_version + ') instead of --msvs-version=' + \
options.msvs_version
generator_flags['msvs_version'] = options.msvs_version
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(build_files, format,
cmdline_default_variables,
includes, options.depth,
params, options.check,
options.circular_check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
# Done
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
#!/usr/bin/python3 -u
import os
import numpy as np
import matplotlib as mpl; mpl.use('Agg'); print("plot WITHOUT Xserver"); # this makes it run without Xserver (e.g. on supercomputer) # see http://stackoverflow.com/questions/4931376/generating-matplotlib-graphs-without-a-running-x-server
import matplotlib.pyplot as plt
import sys
'''
import basUtils
import elements
import GridUtils as GU
import ProbeParticleUtils as PPU
import PPPlot
'''
import pyProbeParticle as PPU
import pyProbeParticle.GridUtils as GU
import pyProbeParticle.PPPlot as PPPlot
from pyProbeParticle import basUtils
from pyProbeParticle import elements
#import pyProbeParticle.core as PPC
import pyProbeParticle.HighLevel as PPH
import pyProbeParticle.cpp_utils as cpp_utils
# =============== arguments definition
from optparse import OptionParser
parser = OptionParser()
parser.add_option( "-k", action="store", type="float", help="tip stiffenss [N/m]" )
parser.add_option( "--krange", action="store", type="float", help="tip stiffenss range (min,max,n) [N/m]", nargs=3)
parser.add_option( "-q", action="store", type="float", help="tip charge [e]" )
parser.add_option( "--qrange", action="store", type="float", help="tip charge range (min,max,n) [e]", nargs=3)
parser.add_option( "-a", action="store", type="float", help="oscilation amplitude [A]" )
parser.add_option( "--arange", action="store", type="float", help="oscilation amplitude range (min,max,n) [A]", nargs=3)
parser.add_option( "--iets", action="store", type="float", help="mass [a.u.]; bias offset [eV]; peak width [eV] ", nargs=3 )
parser.add_option( "--tip_base_q", action="store", type="float", help="tip_base charge [e]" )
parser.add_option( "--tip_base_qrange", action="store", type="float", help="tip_base charge range (min,max,n) [e]", nargs=3)
parser.add_option( "--Fz", action="store_true", default=False, help="plot images for Fz " )
parser.add_option( "--df", action="store_true", default=False, help="plot images for dfz " )
parser.add_option( "--save_df" , action="store_true", default=False, help="save frequency shift as df.xsf " )
parser.add_option( "--pos", action="store_true", default=False, help="save probe particle positions" )
parser.add_option( "--atoms", action="store_true", default=False, help="plot atoms to images" )
parser.add_option( "--bonds", action="store_true", default=False, help="plot bonds to images" )
parser.add_option( "--cbar", action="store_true", default=False, help="plot legend to images" )
parser.add_option( "--WSxM", action="store_true", default=False, help="save frequency shift into WsXM *.dat files" )
parser.add_option( "--bI", action="store_true", default=False, help="plot images for Boltzmann current" )
parser.add_option( "--npy" , action="store_true", default=False, help="load and save fields in npy instead of xsf" )
parser.add_option( "--2Dnp" , action="store_true", default=False, help="save fields in 2D npy instead of array" )
parser.add_option( "--noPBC", action="store_false", default=True, help="pbc False" )
parser.add_option( "--no_int", action="store_true", default=False, help="plot without interpolation between the points")
(options, args) = parser.parse_args()
opt_dict = vars(options)
print("opt_dict: ")
print(opt_dict)
if options.npy:
data_format ="npy"
else:
data_format ="xsf"
# =============== Setup
# dgdfgdfg
print(" >> OVEWRITING SETTINGS by params.ini ")
PPU.loadParams( 'params.ini' )
#PPPlot.params = PPU.params
print(" >> OVEWRITING SETTINGS by command line arguments ")
# Ks
if opt_dict['krange'] is not None:
Ks = np.linspace( opt_dict['krange'][0], opt_dict['krange'][1], int( opt_dict['krange'][2] ) )
elif opt_dict['k'] is not None:
Ks = [ opt_dict['k'] ]
else:
Ks = [ PPU.params['stiffness'][0] ]
# Qs
if opt_dict['qrange'] is not None:
Qs = np.linspace( opt_dict['qrange'][0], opt_dict['qrange'][1], int( opt_dict['qrange'][2] ) )
elif opt_dict['q'] is not None:
Qs = [ opt_dict['q'] ]
else:
Qs = [ PPU.params['charge'] ]
# Amps
if opt_dict['arange'] is not None:
Amps = np.linspace( opt_dict['arange'][0], opt_dict['arange'][1], int( opt_dict['arange'][2] ) )
elif opt_dict['a'] is not None:
Amps = [ opt_dict['a'] ]
else:
Amps = [ PPU.params['Amplitude'] ]
# TbQs
if opt_dict['tip_base_qrange'] is not None:
TbQs = np.linspace( opt_dict['tip_base_qrange'][0], opt_dict['tip_base_qrange'][1], int( opt_dict['tip_base_qrange'][2] ) )
elif opt_dict['tip_base_q'] is not None:
TbQs = [ opt_dict['tip_base_q'] ]
else:
TbQs = [ float(PPU.params['tip_base'][1]) ]
print("Ks =", Ks)
print("Qs =", Qs)
print("Amps =", Amps)
print("TbQs =", TbQs)
#sys.exit(" STOPPED ")
print(" ============= RUN ")
dz = PPU.params['scanStep'][2]
xTips,yTips,zTips,lvecScan = PPU.prepareScanGrids( )
extent = ( xTips[0], xTips[-1], yTips[0], yTips[-1] )
interpolation=PPU.params['imageInterpolation'] if not opt_dict['no_int'] else None
atoms_str=""
atoms = None
bonds = None
if opt_dict['atoms'] or opt_dict['bonds']:
atoms_str="_atoms"
atoms, tmp1, tmp2 = basUtils.loadAtoms( 'input_plot.xyz' )
del tmp1, tmp2;
# print "atoms ", atoms
if os.path.isfile( 'atomtypes.ini' ):
print(">> LOADING LOCAL atomtypes.ini")
FFparams=PPU.loadSpecies( 'atomtypes.ini' )
else:
FFparams = PPU.loadSpecies( cpp_utils.PACKAGE_PATH+'/defaults/atomtypes.ini' )
iZs,Rs,Qstmp=PPH.parseAtoms(atoms, autogeom = False,PBC = False, FFparams=FFparams)
atom_colors = basUtils.getAtomColors(iZs,FFparams=FFparams)
Rs=Rs.transpose().copy()
atoms= [iZs,Rs[0],Rs[1],Rs[2],atom_colors]
#print "atom_colors: ", atom_colors
if opt_dict['bonds']:
bonds = basUtils.findBonds(atoms,iZs,1.0,FFparams=FFparams)
#print "bonds ", bonds
atomSize = 0.15
cbar_str =""
if opt_dict['cbar']:
cbar_str="_cbar"
for iq,Q in enumerate( Qs ):
for ik,K in enumerate( Ks ):
dirname = "Q%1.2fK%1.2f" %(Q,K)
if opt_dict['pos']:
try:
PPpos, lvec, nDim = GU.load_vec_field( dirname+'/PPpos' ,data_format=data_format)
print(" plotting PPpos : ")
PPPlot.plotDistortions( dirname+"/xy"+atoms_str+cbar_str, PPpos[:,:,:,0], PPpos[:,:,:,1], slices = list(range( 0, len(PPpos))), BG=PPpos[:,:,:,2], extent=extent, atoms=atoms, bonds=bonds, atomSize=atomSize, markersize=2.0, cbar=opt_dict['cbar'] )
del PPpos
except:
print("error: ", sys.exc_info())
print("cannot load : " + ( dirname+'/PPpos_?.' + data_format ))
if opt_dict['iets'] is not None:
#try:
eigvalK, lvec, nDim = GU.load_vec_field( dirname+'/eigvalKs' ,data_format=data_format)
M = opt_dict['iets'][0]
E0 = opt_dict['iets'][1]
w = opt_dict['iets'][2]
print(" plotting IETS M=%f V=%f w=%f " %(M,E0,w))
hbar = 6.58211951440e-16 # [eV.s]
aumass = 1.66053904020e-27 # [kg]
eVA2_to_Nm = 16.0217662 # [eV/A^2] / [N/m]
Evib = hbar * np.sqrt( ( eVA2_to_Nm * eigvalK )/( M * aumass ) )
IETS = PPH.symGauss(Evib[:,:,:,0], E0, w) + PPH.symGauss(Evib[:,:,:,1], E0, w) + PPH.symGauss(Evib[:,:,:,2], E0, w)
PPPlot.plotImages( dirname+"/IETS"+atoms_str+cbar_str, IETS, slices = list(range(0,len(IETS))), zs=zTips, extent=extent, interpolation=interpolation, atoms=atoms, bonds=bonds, atomSize=atomSize, cbar=opt_dict['cbar'] )
PPPlot.plotImages( dirname+"/Evib"+atoms_str+cbar_str, Evib[:,:,:,0], slices = list(range(0,len(IETS))), zs=zTips, extent=extent, interpolation=interpolation, atoms=atoms, bonds=bonds, atomSize=atomSize, cbar=opt_dict['cbar'] )
PPPlot.plotImages( dirname+"/Kvib"+atoms_str+cbar_str, 16.0217662 * eigvalK[:,:,:,0], slices = list(range(0,len(IETS))), zs=zTips, extent=extent, atoms=atoms, interpolation=interpolation, bonds=bonds, atomSize=atomSize, cbar=opt_dict['cbar'] )
print("Preparing data for plotting denominators: avoidning negative frequencies via nearest neighbours Uniform Filter")
from scipy.ndimage import uniform_filter
for i in range(len(eigvalK)):
for l in [0]: #range(len(eigvalK[0,0,0])):
eigvalK[i,:,:,l]=uniform_filter(eigvalK[i,:,:,l].copy(), size=3, mode='nearest')
tmp_bool=False
for i in range(len(eigvalK)):
for j in range(len(eigvalK[0])):
for k in range(len(eigvalK[0,0])):
for l in [0]: #range(len(eigvalK[0,0,0])):
if (eigvalK[i,j,k,l] < 0):
print("BEWARE: Negative value at: i,j,k,l:",i,j,k,l)
tmp_bool = True
if tmp_bool:
print("if many negative values appear change FF grid or scanning grid")
denomin = 1/(hbar * np.sqrt( ( eVA2_to_Nm * eigvalK )/( M * aumass ) ))
print("plotting denominators fpr frustrated translation: 1/w1 + 1/w2")
PPPlot.plotImages( dirname+"/denomin"+atoms_str+cbar_str, denomin[:,:,:,0]+denomin[:,:,:,1] , slices = list(range(0,len(denomin))), zs=zTips, extent=extent, interpolation=interpolation, atoms=atoms, bonds=bonds, atomSize=atomSize, cbar=opt_dict['cbar'] )
if opt_dict['WSxM']:
GU.saveWSxM_3D(dirname+"/denomin" , denomin[:,:,:,0]+denomin[:,:,:,1] , extent, slices = list(range(0,len(denomin))) )
GU.saveWSxM_3D(dirname+"/IETS" , IETS , extent, slices = list(range(0,len(IETS))) )
del eigvalK; del Evib; del IETS; del denomin;
#except:
# print "error: ", sys.exc_info()
# print "cannot load : " + ( dirname+'/eigvalKs_?.' + data_format )
if ( ( opt_dict['df'] or opt_dict['save_df'] or opt_dict['WSxM'] or opt_dict['2Dnp']) ):
try :
fzs, lvec, nDim = GU.load_scal_field( dirname+'/OutFz' , data_format=data_format)
if not ( (len(TbQs) == 1 ) and ( TbQs[0] == 0.0 ) ):
print("loading tip_base forces")
try:
fzt, lvect, nDimt = GU.load_scal_field( './OutFzTip_base' , data_format=data_format)
except:
print("error: ", sys.exc_info())
print("cannot load : ", './OutFzTip_base.'+data_format)
for iA,Amp in enumerate( Amps ):
for iT, TbQ in enumerate( TbQs ):
if (TbQ == 0.0 ):
AmpStr = "/Amp%2.2f" %Amp
print("Amp= ",AmpStr)
dirNameAmp = dirname+AmpStr
if not os.path.exists( dirNameAmp ):
os.makedirs( dirNameAmp )
dfs = PPU.Fz2df( fzs, dz = dz, k0 = PPU.params['kCantilever'], f0=PPU.params['f0Cantilever'], n= int(Amp/dz) )
else:
AmpStr = "/Amp%2.2f_qTip%2.2f" %(Amp,TbQ)
print("Amp= ",AmpStr)
dirNameAmp = dirname+AmpStr
if not os.path.exists( dirNameAmp ):
os.makedirs( dirNameAmp )
dfs = PPU.Fz2df( fzs + TbQ*fzt, dz = dz, k0 = PPU.params['kCantilever'], f0=PPU.params['f0Cantilever'], n= int(Amp/dz) )
if opt_dict['save_df']:
GU.save_scal_field( dirNameAmp+'/df', dfs, lvec, data_format=data_format )
if opt_dict['df']:
print(" plotting df : ")
PPPlot.plotImages( dirNameAmp+"/df"+atoms_str+cbar_str,
dfs, slices = list(range( 0,
len(dfs))), zs=zTips, extent=extent, interpolation=interpolation, atoms=atoms, bonds=bonds, atomSize=atomSize, cbar=opt_dict['cbar'] )
if opt_dict['WSxM']:
print(" printing df into WSxM files :")
GU.saveWSxM_3D( dirNameAmp+"/df" , dfs , extent , slices=None)
if opt_dict['2Dnp']:
print(" printing df into separate np files :")
for iz in range(len(dfs)):
np.save(dirNameAmp+"/df_%03d.npy" %iz ,dfs[iz])
del dfs
del fzs
except:
print("error: ", sys.exc_info())
print("cannot load : ", dirname+'/OutFz.'+data_format)
if opt_dict['Fz'] :
try :
fzs, lvec, nDim = GU.load_scal_field( dirname+'/OutFz' , data_format=data_format)
print(" plotting Fz : ")
PPPlot.plotImages(dirname+"/Fz"+atoms_str+cbar_str,
fzs, slices = list(range( 0,
len(fzs))), zs=zTips, extent=extent, interpolation=interpolation, atoms=atoms, bonds=bonds, atomSize=atomSize, cbar=opt_dict['cbar'] )
if opt_dict['WSxM']:
print(" printing Fz into WSxM files :")
GU.saveWSxM_3D( dirname+"/Fz" , fzs , extent , slices=None)
del fzs
except:
print("error: ", sys.exc_info())
print("cannot load : ", dirname+'/OutFz.'+data_format)
if opt_dict['bI']:
try:
I, lvec, nDim = GU.load_scal_field( dirname+'/OutI_boltzmann', data_format=data_format )
print(" plotting Boltzmann current: ")
PPPlot.plotImages(
dirname+"/OutI"+atoms_str+cbar_str,
I, slices = list(range( 0,
len(I))), zs=zTips, extent=extent, interpolation=interpolation, atoms=atoms, bonds=bonds, atomSize=atomSize, cbar=opt_dict['cbar'] )
del I
except:
print("error: ", sys.exc_info())
print("cannot load : " + ( dirname+'/OutI_boltzmann.'+data_format ))
print(" ***** ALL DONE ***** ")
#plt.show() # for interactive plotting you have to comment "import matplotlib as mpl; mpl.use('Agg');" at the end
|
|
from collections import namedtuple
from enum import IntEnum, Enum
import os
ROOT_DIRECTORY = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.pardir)
)
DATA_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'data')
# ------------------------------------------------------------------------------
# For general backtest information
# ------------------------------------------------------------------------------
ANNUAL_FACTOR = 252.0
# ------------------------------------------------------------------------------
# For futures contract
# ------------------------------------------------------------------------------
RETURN_KEY_PRIORITY = ("Settle", "Settlement Price", "Last Traded",
"Last", "Close", "Previous Settlement")
VOLUME_KEY_PRIORITY = ('Volume', 'Total Volume')
DEFAULT_ROLL_RULE = "-3bd"
QUANDL_GENERIC_TICKER_MATCH = '^\w+/\w+$'
QUANDL_FULL_TICKER_MATCH = '^\w+/\w+[FGHJKMNQUVXZ][0-9]+$'
QUANDL_TICKER_FORMAT = '^{}[FGHJKMNQUVXZ][0-9]+$'
futures_info = namedtuple("futures_info",
["full_name", "asset_class", "start_from",
"denominator", "tick_size", "contract_ccy",
"roll_schedule",
"first_notice_date", "last_trade_date"])
class FutureContractMonth(IntEnum):
F = 1
G = 2
H = 3
J = 4
K = 5
M = 6
N = 7
Q = 8
U = 9
V = 10
X = 11
Z = 12
class AssetClass(Enum):
EQUITY_FUT = "equity_futures"
VOL_INDEX_FUT = "volatility_index_futures"
GOVT_FUT = "government_bond_futures"
MM_FUT = "money_market_futures"
FX_FUT = "fx_futures"
COMDTY_FUT = "commodity_futures"
class Denominator(Enum):
GOVT_FUT = 'government_bond_futures'
MM_FUT = 'money_market_futures'
class FuturesInfo(Enum):
CME_ES = futures_info("E-mini S&P 500 Index", AssetClass.EQUITY_FUT.value,
"Z1997", None, 0.25, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
CME_SP = futures_info("Full-size S&P 500 Index",
AssetClass.EQUITY_FUT.value,
"M1982", None, 0.1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
CME_NQ = futures_info("E-mini NASDAQ 100 Index",
AssetClass.EQUITY_FUT.value,
"U1999", None, 0.25, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
CME_ND = futures_info("Full-size NASDAQ 100 Index",
AssetClass.EQUITY_FUT.value,
"H1998", None, 0.25, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
CME_DJ = futures_info("Full-size Dow Jones", AssetClass.EQUITY_FUT.value,
"H1998", None, 1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
CME_YM = futures_info("E-mini Dow Jones Futures", AssetClass.EQUITY_FUT.value,
"H2012", None, 1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
CME_MD = futures_info("S&P 400 MidCap Index", AssetClass.EQUITY_FUT.value,
"H1992", None, 0.05, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
ICE_RF = futures_info("Russell 1000", AssetClass.EQUITY_FUT.value,
"U2008", None, 0.1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
ICE_TF = futures_info("Russell Small-Cap", AssetClass.EQUITY_FUT.value,
"H2007", None, 0.1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
ICE_RV = futures_info("Russell Value", AssetClass.EQUITY_FUT.value,
"M2010", None, 0.1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
ICE_RG = futures_info("Russell Growth", AssetClass.EQUITY_FUT.value,
"M2010", None, 0.1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
SGX_NK = futures_info("Nikkei 225 Index", AssetClass.EQUITY_FUT.value,
"Z2013", None, 5, "JPY",
["H", "M", "U", "Z"],
None, "-Thu+Thu+Thu-2bd") # sometimes prices are missing
CME_NK = futures_info("Nikkei 225 Index USD", AssetClass.EQUITY_FUT.value,
"Z1990", None, 5, "USD",
["H", "M", "U", "Z"],
None, "-Fri+Fri+Fri-1bd")
EUREX_FESX = futures_info("EURO STOXX 50", AssetClass.EQUITY_FUT.value,
"U1998", None, 1, "EUR",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
EUREX_FDAX = futures_info("DAX", AssetClass.EQUITY_FUT.value,
"H1997", None, 0.5, "EUR",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
EUREX_FSMI = futures_info("SMI", AssetClass.EQUITY_FUT.value,
"Z2013", None, 1, "CHF",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
LIFFE_FCE = futures_info("CAC40", AssetClass.EQUITY_FUT.value,
"H1999", None, 0.5, "EUR",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
LIFFE_Z = futures_info("FTSE 100", AssetClass.EQUITY_FUT.value,
"M1984", None, 0.5, "GBP",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
LIFFE_FTI = futures_info("AEX", AssetClass.EQUITY_FUT.value,
"H2014", None, 0.05, "EUR",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
# HKEX_HSI = futures_info("Hong Kong Hang Seng", AssetClass.EQUITY_FUT.value,
# "U1997", None, 0.5, "HKD",
# ["H", "M", "U", "Z"],
# None, "+BMonthEnd-1bd")
CME_IBV = futures_info("Ibovespa", AssetClass.EQUITY_FUT.value,
"Z2012", None, 5, "USD",
["G", "J", "M", "Q", "V", "Z"],
None, "+14d+Wed-Wed-1bd+1bd")
CFFEX_IF = futures_info("CSI 300", AssetClass.EQUITY_FUT.value,
"Z2010", None, 0.2, "CNY",
[m.name for m in FutureContractMonth],
None, "-1Fri+1Fri+2Fri")
MX_SXF = futures_info("S&P/TSX 60 Index", AssetClass.EQUITY_FUT.value,
"M2011", None, 0.1, "CAD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri-1bd")
SGX_IN = futures_info("Nifty Index", AssetClass.EQUITY_FUT.value,
"Z2013", None, 0.5, "USD",
["H", "M", "U", "Z"],
None, "+BMonthEnd+Thu-Thu")
LIFFE_BXF = futures_info("BEL 20 Index", AssetClass.EQUITY_FUT.value,
"V2013", None, 0.5, "EUR",
[m.name for m in FutureContractMonth],
None, "-1Fri+1Fri+2Fri")
LIFFE_PSI = futures_info("PSI 20 Index", AssetClass.EQUITY_FUT.value,
"Z2013", None, 1, "EUR",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
ASX_AP = futures_info("Australia SPI 200 Index",
AssetClass.EQUITY_FUT.value,
"Z2013", None, 1, "AUD",
["H", "M", "U", "Z"],
None, "-1Thu+1Thu+2Thu")
SGX_CN = futures_info("FTSE China A50 Index", AssetClass.EQUITY_FUT.value,
"V2013", None, 1, "USD",
[m.name for m in FutureContractMonth],
None, "+BMonthEnd-1bd")
SGX_ID = futures_info("MSCI Indonesia Index", AssetClass.EQUITY_FUT.value,
"V2013", None, 5, "USD",
[m.name for m in FutureContractMonth],
None, "+BMonthEnd-1bd")
SGX_SG = futures_info("MSCI Singapore Index", AssetClass.EQUITY_FUT.value,
"V2013", None, 0.05, "USD",
[m.name for m in FutureContractMonth],
None, "+BMonthEnd-1bd")
SGX_TW = futures_info("MSCI Taiwan Index", AssetClass.EQUITY_FUT.value,
"V2013", None, 0.1, "USD",
[m.name for m in FutureContractMonth],
None, "+BMonthEnd-1bd")
EUREX_FMWO = futures_info("MSCI World Index", AssetClass.EQUITY_FUT.value,
"U2013", None, 1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
EUREX_FMEU = futures_info("MSCI Europe Index", AssetClass.EQUITY_FUT.value,
"U2013", None, 0.05, "EUR",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
EUREX_FMMX = futures_info("MSCI Mexico Index", AssetClass.EQUITY_FUT.value,
"U2013", None, 0.1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
EUREX_FMCN = futures_info("MSCI China Free Index", AssetClass.EQUITY_FUT.value,
"U2013", None, 0.1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
EUREX_FMJP = futures_info("MSCI Japan Index", AssetClass.EQUITY_FUT.value,
"U2013", None, 1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
EUREX_FMRS = futures_info("MSCI Russia Index", AssetClass.EQUITY_FUT.value,
"U2013", None, 0.1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
EUREX_FMZA = futures_info("MSCI South Africa Index", AssetClass.EQUITY_FUT.value,
"U2013", None, 0.1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
EUREX_FMTH = futures_info("MSCI Thailand Index", AssetClass.EQUITY_FUT.value,
"U2013", None, 0.5, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
EUREX_FMMY = futures_info("MSCI Malaysia Index", AssetClass.EQUITY_FUT.value,
"U2013", None, 0.1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
EUREX_FMEA = futures_info("MSCI Emerging Markets Asia Index", AssetClass.EQUITY_FUT.value,
"U2013", None, 0.1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
EUREX_FMEM = futures_info("MSCI Emerging Markets Index", AssetClass.EQUITY_FUT.value,
"U2013", None, 0.1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
EUREX_FMEL = futures_info("MSCI Emerging Markets Latin America Index", AssetClass.EQUITY_FUT.value,
"U2013", None, 0.1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
EUREX_FMEE = futures_info("MSCI Emerging Markets EMEA Index", AssetClass.EQUITY_FUT.value,
"U2013", None, 0.1, "USD",
["H", "M", "U", "Z"],
None, "-1Fri+1Fri+2Fri")
CBOE_VX = futures_info("VIX Futures", AssetClass.VOL_INDEX_FUT.value,
"K2004", None, 0.05, "USD",
[m.name for m in FutureContractMonth],
None, "+MonthBegin-1Fri+1Fri+2Fri-30d+1bd-1bd")
EUREX_FVS = futures_info("VSTOXX Futures", AssetClass.VOL_INDEX_FUT.value,
"U2013", None, 0.05, "EUR",
[m.name for m in FutureContractMonth],
None, "+MonthBegin-1Fri+1Fri+2Fri-30d+1bd-1bd")
# Government bond futures - CME
# https://www.cmegroup.com/education/files/
# understanding-treasury-futures.pdf
CME_TU = futures_info("2-year Treasury Note", AssetClass.GOVT_FUT.value,
"U1990", Denominator.GOVT_FUT.value, 1.0 / 128, "USD",
["H", "M", "U", "Z"],
"-BMonthEnd", "+BMonthEnd")
CME_FV = futures_info("5-year Treasury Note", AssetClass.GOVT_FUT.value,
"U1988", Denominator.GOVT_FUT.value, 1.0 / 128, "USD",
["H", "M", "U", "Z"],
"-BMonthEnd", "+BMonthEnd")
CME_TY = futures_info("10-year Treasury Note", AssetClass.GOVT_FUT.value,
"M1990", Denominator.GOVT_FUT.value, 1.0 / 64, "USD",
["H", "M", "U", "Z"],
"-BMonthEnd", "+BMonthEnd-7bd")
CME_US = futures_info("30-year Treasury Bond", AssetClass.GOVT_FUT.value,
"Z1977", Denominator.GOVT_FUT.value, 1.0 / 32, "USD",
["H", "M", "U", "Z"],
"-BMonthEnd", "+BMonthEnd-7bd")
CME_UL = futures_info("Ultra Treasury Bond", AssetClass.GOVT_FUT.value,
"Z2012", Denominator.GOVT_FUT.value, 1.0 / 32, "USD",
["H", "M", "U", "Z"],
"-BMonthEnd", "+BMonthEnd-7bd")
EUREX_FGBS = futures_info("Euro-Schatz", AssetClass.GOVT_FUT.value,
"M1997", Denominator.GOVT_FUT.value, 0.005, "EUR",
["H", "M", "U", "Z"],
None, "+9d-1bd+1bd-2bd")
EUREX_FGBM = futures_info("Euro-Bobl", AssetClass.GOVT_FUT.value,
"U1998", Denominator.GOVT_FUT.value, 0.01, "EUR",
["H", "M", "U", "Z"],
None, "+9d-1bd+1bd-2bd")
EUREX_FGBL = futures_info("Euro-Bund", AssetClass.GOVT_FUT.value,
"H1991", Denominator.GOVT_FUT.value, 0.01, "EUR",
["H", "M", "U", "Z"],
None, "+9d-1bd+1bd-2bd")
EUREX_FGBX = futures_info("Euro-Buxl", AssetClass.GOVT_FUT.value,
"Z2013", Denominator.GOVT_FUT.value, 0.02, "EUR",
["H", "M", "U", "Z"],
None, "+9d-1bd+1bd-2bd")
EUREX_FBTS = futures_info("Short-term Euro-BTP", AssetClass.GOVT_FUT.value,
"U2013", Denominator.GOVT_FUT.value, 0.01, "EUR",
["H", "M", "U", "Z"],
None, "+9d-1bd+1bd-2bd")
EUREX_FBTP = futures_info("Long-term Euro-BTP", AssetClass.GOVT_FUT.value,
"Z2009", Denominator.GOVT_FUT.value, 0.01, "EUR",
["H", "M", "U", "Z"],
None, "+9d-1bd+1bd-2bd")
EUREX_FOAT = futures_info("Euro-OAT", AssetClass.GOVT_FUT.value,
"H2013", Denominator.GOVT_FUT.value, 0.01, "EUR",
["H", "M", "U", "Z"],
None, "+9d-1bd+1bd-2bd")
EUREX_CONF = futures_info("Swiss CONF", AssetClass.GOVT_FUT.value,
"U2013", Denominator.GOVT_FUT.value, 0.01, "CHF",
["H", "M", "U", "Z"],
None, "+9d-1bd+1bd-2bd")
MX_CGB = futures_info("10-year Canadian Bond", AssetClass.GOVT_FUT.value,
"H2009", Denominator.GOVT_FUT.value, 0.01, "CAD",
["H", "M", "U", "Z"],
"-3bd", "+BMonthEnd-7bd")
LIFFE_G = futures_info("Short Gilt", AssetClass.GOVT_FUT.value,
"Z2013", Denominator.GOVT_FUT.value, 0.01, "GBP",
["H", "M", "U", "Z"],
"-2bd", "+BMonthEnd-2bd")
LIFFE_H = futures_info("Medium Gilt", AssetClass.GOVT_FUT.value,
"Z2013", Denominator.GOVT_FUT.value, 0.01, "GBP",
["H", "M", "U", "Z"],
"-2bd", "+BMonthEnd-2bd")
LIFFE_R = futures_info("Long Gilt", AssetClass.GOVT_FUT.value,
"U1990", Denominator.GOVT_FUT.value, 0.01, "GBP",
["H", "M", "U", "Z"],
"-2bd", "+BMonthEnd-2bd")
# SGX JGB ceases one bd before OSE contract
# sometimes data on last trading data are missing.
SGX_JB = futures_info("10-year Mini Japanese Government Bond",
AssetClass.GOVT_FUT.value,
"Z2013", Denominator.GOVT_FUT.value, 0.01, "JPY",
["H", "M", "U", "Z"],
None, "+19d-1bd+1bd-5bd-5bd")
# Money market futures
CME_ED = futures_info("3-month Eurodollar Futures", AssetClass.MM_FUT.value,
"H1982", Denominator.MM_FUT.value, 0.0025, "USD",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
LIFFE_L = futures_info("Short Sterling Futures", AssetClass.MM_FUT.value,
"H1990", Denominator.MM_FUT.value, 0.005, "GBP",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed")
LIFFE_I = futures_info("3-month EURIBOR Futures", AssetClass.MM_FUT.value,
"H1999", Denominator.MM_FUT.value, 0.005, "EUR",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
LIFFE_S = futures_info("EUROSWISS Interest Rate Futures",
AssetClass.MM_FUT.value,
"H1991", Denominator.MM_FUT.value, 0.01, "CHF",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
TFX_JBA = futures_info("Tokyo 3-month Euroyen Futures",
AssetClass.MM_FUT.value,
"U1992", Denominator.MM_FUT.value, 0.005, "JPY",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
# FX - CME
# https://www.cmegroup.com/education/files/understanding-fx-futures.pdf
CME_EC = futures_info("Euro FX", AssetClass.FX_FUT.value,
"H1999", None, 0.00005, "USD",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
CME_JY = futures_info("Japanese Yen", AssetClass.FX_FUT.value,
"H1977", None, 0.005 * 100, "USD",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
CME_BP = futures_info("British Pound", AssetClass.FX_FUT.value,
"U1975", None, 0.01 / 100, "USD",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
CME_SF = futures_info("Swiss Franc", AssetClass.FX_FUT.value,
"U1975", None, 0.01 / 100, "USD",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
CME_CD = futures_info("Canadian Dollar", AssetClass.FX_FUT.value,
"M1977", None, 0.005 / 100, "USD",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
CME_AD = futures_info("Australian Dollar", AssetClass.FX_FUT.value,
"H1987", None, 0.01 / 100, "USD",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
CME_NE = futures_info("New Zealand Dollar", AssetClass.FX_FUT.value,
"H2004", None, 0.01 / 100, "USD",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
CME_MP = futures_info("Mexican Peso", AssetClass.FX_FUT.value,
"M1995", None, 0.001 * 10000, "USD",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
CME_RU = futures_info("Russian Ruble", AssetClass.FX_FUT.value,
"Z2011", None, 0.0005 * 10000, "USD",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
CME_BR = futures_info("Brazilian Real", AssetClass.FX_FUT.value,
"H1996", None, 0.005 / 100, "USD",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
CME_RA = futures_info("South African Rand", AssetClass.FX_FUT.value,
"H2014", None, 0.0025 * 10000, "USD",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
CME_PZ = futures_info("Polish Zloty", AssetClass.FX_FUT.value,
"H2014", None, 0.00002, "USD",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
CME_TRY = futures_info("Turkish Lira", AssetClass.FX_FUT.value,
"H2014", None, 0.0001, "TRY",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
CME_CNH = futures_info("Standard-size USD/Offshore RMB (CNH)",
AssetClass.FX_FUT.value,
"H2014", None, 0.0001, "CNH",
["H", "M", "U", "Z"],
None, "-1Wed+1Wed+2Wed-2bd")
# CME - Commodity
# Grains https://www.cmegroup.com/trading/agricultural/files/
# AC-268_Grains_FC_FINAL_SR.pdf
CME_GI = futures_info("S&P GSCI", AssetClass.COMDTY_FUT.value,
"Q1992", None, 0.05, "USD",
[m.name for m in FutureContractMonth],
None, "+10bd")
CME_C = futures_info("Chicago Corn", AssetClass.COMDTY_FUT.value,
"H1960", None, 0.25, "USD",
["H", "K", "N", "U", "Z"],
None, "+14d-1bd")
CME_W = futures_info("Chicago Wheat", AssetClass.COMDTY_FUT.value,
"Z1959", None, 0.25, "USD",
["H", "K", "N", "U", "Z"],
None, "+14d-1bd")
CME_S = futures_info("Chicago Soybeans", AssetClass.COMDTY_FUT.value,
"F1970", None, 0.25, "USD",
["F", "H", "K", "N", "Q", "U", "X"],
None, "+14d-1bd")
CME_KW = futures_info("KC HRW Wheat", AssetClass.COMDTY_FUT.value,
"N1976", None, 0.25, "USD",
["H", "K", "N", "U", "Z"],
None, "+14d-1bd")
ICE_RS = futures_info('Canola',
AssetClass.COMDTY_FUT.value,
'F1981', None, 0.1, 'USD',
['F', 'H', 'K', 'N', 'X'],
None, '+14d+1bd-1bd')
LIFFE_EBM = futures_info('Milling Wheat',
AssetClass.COMDTY_FUT.value,
'H2013', None, 0.01, 'EUR',
['F', 'H', 'K', 'U', 'X', 'Z'],
None, '+9d-1bd+1bd')
LIFFE_ECO = futures_info('Rapeseed',
AssetClass.COMDTY_FUT.value,
'X2013', None, 0.25, 'EUR',
['G', 'K', 'Q', 'X'],
None, '-BMonthEnd')
MGEX_MW = futures_info('Hard Red Spring Wheat',
AssetClass.COMDTY_FUT.value,
'H1989', None, 0.25, 'USD',
['H', 'K', 'N', 'U', 'X'],
None, '+14d+1bd-1bd')
# Gold https://www.cmegroup.com/trading/metals/files/
# MT-055E_GoldFuturesOptions.pdf
CME_GC = futures_info("COMEX Gold", AssetClass.COMDTY_FUT.value,
"G1975", None, 0.1, "USD",
["G", "J", "M", "Q", "V", "Z"],
"+0bd", "+MonthEnd-3bd")
# Silver https://www.cmegroup.com/trading/metals/files/
# cme-micro-silver-article.pdf
CME_SI = futures_info("COMEX Silver", AssetClass.COMDTY_FUT.value,
"H1964", None, 0.005, "USD",
["F", "H", "K", "N", "U", "Z"],
"+0bd", "+MonthEnd-3bd")
# Platinum https://www.cmegroup.com/trading/metals/files/
# platinum-and-palladium-futures-and-options.pdf
CME_PL = futures_info("Platinum", AssetClass.COMDTY_FUT.value,
"F1970", None, 0.1, "USD",
["F", "J", "N", "V"],
"+0bd", "+MonthEnd-2bd")
CME_PA = futures_info("Palladium", AssetClass.COMDTY_FUT.value,
"H1977", None, 0.05, "USD",
["H", "M", "U", "Z"],
"+0bd", "+MonthEnd-2bd")
# Copper https://www.cmegroup.com/trading/metals/files/
# copper-futures-and-options.pdf
CME_HG = futures_info("Copper CME", AssetClass.COMDTY_FUT.value,
"Z1959", None, 0.05 / 100, "USD",
["H", "K", "N", "U", "Z"],
"+0bd", "+MonthEnd-3bd")
# Crude https://www.cmegroup.com/trading/energy/files/
# light-sweet-crude-oil-futures-options.pdf
CME_CL = futures_info("WTI Crude Oil", AssetClass.COMDTY_FUT.value,
"M1983", None, 0.01, "USD",
[m.name for m in FutureContractMonth],
None, "-1m+24d+1bd-4bd")
# https://www.cmegroup.com/trading/energy/files/
# EN-171_EnergyRetailBrochure_LowRes.pdf
CME_HO = futures_info("Heating Oil", AssetClass.COMDTY_FUT.value,
"F1980", None, 0.01 / 100, "USD",
[m.name for m in FutureContractMonth],
None, "-BMonthEnd")
CME_RB = futures_info("Gasoline", AssetClass.COMDTY_FUT.value,
"F2006", None, 0.01 / 100, "USD",
[m.name for m in FutureContractMonth],
None, "-BMonthEnd")
# Natural gas https://www.cmegroup.com/education/files/
# PM310_Natural_Gas_Futures.pdf
CME_NG = futures_info("Natural Gas", AssetClass.COMDTY_FUT.value,
"M1990", None, 0.001, "USD",
[m.name for m in FutureContractMonth],
None, "-3bd")
CME_N9 = futures_info('PJM Western Hub Real-Time Off-Peak',
AssetClass.COMDTY_FUT.value,
'G2014', None, 0.05, 'USD',
[m.name for m in FutureContractMonth],
None, '-BMonthEnd')
CME_B6 = futures_info('PJM Northern Illinois Hub Real-Time Off-Peak',
AssetClass.COMDTY_FUT.value,
'G2014', None, 0.05, 'USD',
[m.name for m in FutureContractMonth],
None, '-BMonthEnd')
CME_E4 = futures_info('PJM Western Hub Day-Ahead Off-Peak',
AssetClass.COMDTY_FUT.value,
'G2014', None, 0.05, 'USD',
[m.name for m in FutureContractMonth],
None, '-BMonthEnd-1bd')
CME_D2 = futures_info('NYISO Zone G Day-Ahead Off-Peak',
AssetClass.COMDTY_FUT.value,
'G2014', None, 0.05, 'USD',
[m.name for m in FutureContractMonth],
None, '-BMonthEnd-1bd')
CME_L3 = futures_info('PJM Northern Illinois Hub Day-Ahead Off-Peak',
AssetClass.COMDTY_FUT.value,
'G2014', None, 0.05, 'USD',
[m.name for m in FutureContractMonth],
None, '-BMonthEnd-1bd')
CME_CU = futures_info('Chicago Ethanol',
AssetClass.COMDTY_FUT.value,
'G2014', None, 0.0001, 'USD',
[m.name for m in FutureContractMonth],
None, '-1bd')
ICE_B = futures_info("Brent Crude Oil", AssetClass.COMDTY_FUT.value,
"F1993", None, 0.01, "USD",
[m.name for m in FutureContractMonth],
None, "-2BMonthEnd")
ICE_G = futures_info("Gasoil", AssetClass.COMDTY_FUT.value,
"F1990", None, 0.25, "USD",
[m.name for m in FutureContractMonth],
None, "+13d-2bd")
ICE_C = futures_info('EUA',
AssetClass.COMDTY_FUT.value,
'Z2005', None, 0.01, 'EUR',
['H', 'M', 'U', 'Z'],
None, '+MonthEnd+Mon-2Mon+1bd-1bd') # for convenience
ICE_M = futures_info('UK Natural Gas',
AssetClass.COMDTY_FUT.value,
'H1997', None, 0.01, 'GBP',
[m.name for m in FutureContractMonth],
None, '-2bd')
# Softs
ICE_SB = futures_info("Sugar No. 11", AssetClass.COMDTY_FUT.value,
"H1964", None, 0.01, "USD",
["H", "K", "N", "V"],
None, "-BMonthEnd")
ICE_KC = futures_info("Coffee C", AssetClass.COMDTY_FUT.value,
"Z1973", None, 0.05, "USD",
["H", "K", "N", "U", "Z"],
None, "+BMonthEnd-8bd")
ICE_CT = futures_info("Cotton", AssetClass.COMDTY_FUT.value,
"K1972", None, 0.01, "USD",
["H", "K", "N", "V", "Z"],
"-1bd+1bd-5bd", "+BMonthEnd-16bd")
ICE_CC = futures_info("Cocoa", AssetClass.COMDTY_FUT.value,
"H1970", None, 1, "USD",
["H", "K", "N", "U", "Z"],
"-1bd+1bd-5bd", "+BMonthEnd-10bd-1bd")
ICE_OJ = futures_info('Orange Juice',
AssetClass.COMDTY_FUT.value,
'K1967', None, 0.05, 'USD',
['F', 'H', 'K', 'N', 'U', 'X'],
None, '+BMonthEnd-14bd')
LIFFE_W = futures_info('White Sugar',
AssetClass.COMDTY_FUT.value,
'V1993', None, 0.1, 'USD',
['H', 'K', 'Q', 'V', 'Z'],
None, '-15d+1bd-1bd')
# https://www.cmegroup.com/trading/agricultural/files/
# fact-card-cattle-futures-options.pdf
CME_LC = futures_info("Live Cattle", AssetClass.COMDTY_FUT.value,
"J1965", None, 0.025, "USD",
["G", "J", "M", "Q", "V", "Z"],
"-Mon+Mon", "+BMonthEnd") # FIXME to check
CME_FC = futures_info("Feeder Cattle", AssetClass.COMDTY_FUT.value,
"H1974", None, 0.01, "USD",
["F", "H", "J", "K", "Q", "U", "V", "Z"],
None, "+BMonthEnd-Thu")
# https://www.cmegroup.com/trading/agricultural/files/
# Lean-Hog-Futures-Options.pdf
CME_LN = futures_info("Lean Hogs", AssetClass.COMDTY_FUT.value,
"G1970", None, 0.025, "USD",
["G", "J", "M", "N", "Q", "V", "Z"],
None, "-1bd+1bd+9bd")
CME_DA = futures_info("Milk", AssetClass.COMDTY_FUT.value,
"F2011", None, 0.01, "USD",
["F", "J", "M", "N", "Q", "V", "Z"],
None, "-1bd+1bd+9bd")
# http://www.hedgebroker.com/documents/education/
# CME_GrainAndOilseedFuturesAndOptions.pdf
CME_BO = futures_info("Soybean Oil", AssetClass.COMDTY_FUT.value,
"F1960", None, 0.01, "USD",
["F", "H", "K", "N", "Q", "U", "V", "Z"],
"-BMonthEnd", "+14d-1bd") # FIXME to check
CME_SM = futures_info("Soybean meat", AssetClass.COMDTY_FUT.value,
"F1964", None, 0.01, "USD",
["F", "H", "K", "N", "Q", "U", "V", "Z"],
"-BMonthEnd", "+14d-1bd") # FIXME to check
class PriceSkipDates(Enum):
ICE_RV = ["2014-04-15"]
ICE_RG = ["2014-04-15"]
LIFFE_BXF = ["2015-08-05"]
SGX_NK = ['2018-01-26', '2018-01-29', '2018-01-30']
SGX_IN = ['2018-01-26']
SGX_CN = ['2018-01-26']
EUREX_FBTP = ['2009-09-01']
SGX_ID = ['2018-01-26']
class ReturnSkipDates(Enum):
CME_RU = ["2014-07-14"]
CME_BR = ["1999-12-14", "2000-01-03", "2000-03-28", "2000-11-24",
"2000-12-01"]
|
|
import taichi.lang
from taichi._lib import core as _ti_core
from taichi.lang.util import python_scope, to_numpy_type, to_pytorch_type
class Field:
"""Taichi field with SNode implementation.
A field is constructed by a list of field members.
For example, a scalar field has 1 field member, while a 3x3 matrix field has 9 field members.
A field member is a Python Expr wrapping a C++ GlobalVariableExpression.
A C++ GlobalVariableExpression wraps the corresponding SNode.
Args:
vars (List[Expr]): Field members.
"""
def __init__(self, _vars):
self.vars = _vars
self.host_accessors = None
self.grad = None
@property
def snode(self):
"""Gets representative SNode for info purposes.
Returns:
SNode: Representative SNode (SNode of first field member).
"""
return self._snode
@property
def _snode(self):
"""Gets representative SNode for info purposes.
Returns:
SNode: Representative SNode (SNode of first field member).
"""
return taichi.lang.snode.SNode(self.vars[0].ptr.snode())
@property
def shape(self):
"""Gets field shape.
Returns:
Tuple[Int]: Field shape.
"""
return self._snode.shape
@property
def dtype(self):
"""Gets data type of each individual value.
Returns:
DataType: Data type of each individual value.
"""
return self._snode._dtype
@property
def _name(self):
"""Gets field name.
Returns:
str: Field name.
"""
return self._snode._name
def parent(self, n=1):
"""Gets an ancestor of the representative SNode in the SNode tree.
Args:
n (int): the number of levels going up from the representative SNode.
Returns:
SNode: The n-th parent of the representative SNode.
"""
return self.snode.parent(n)
def _get_field_members(self):
"""Gets field members.
Returns:
List[Expr]: Field members.
"""
return self.vars
def _loop_range(self):
"""Gets representative field member for loop range info.
Returns:
taichi_core.Expr: Representative (first) field member.
"""
return self.vars[0].ptr
def _set_grad(self, grad):
"""Sets corresponding gradient field.
Args:
grad (Field): Corresponding gradient field.
"""
self.grad = grad
@python_scope
def fill(self, val):
"""Fills `self` with a specific value.
Args:
val (Union[int, float]): Value to fill.
"""
raise NotImplementedError()
@python_scope
def to_numpy(self, dtype=None):
"""Converts `self` to a numpy array.
Args:
dtype (DataType, optional): The desired data type of returned numpy array.
Returns:
numpy.ndarray: The result numpy array.
"""
raise NotImplementedError()
@python_scope
def to_torch(self, device=None):
"""Converts `self` to a torch tensor.
Args:
device (torch.device, optional): The desired device of returned tensor.
Returns:
torch.tensor: The result torch tensor.
"""
raise NotImplementedError()
@python_scope
def from_numpy(self, arr):
"""Loads all elements from a numpy array.
The shape of the numpy array needs to be the same as `self`.
Args:
arr (numpy.ndarray): The source numpy array.
"""
raise NotImplementedError()
@python_scope
def from_torch(self, arr):
"""Loads all elements from a torch tensor.
The shape of the torch tensor needs to be the same as `self`.
Args:
arr (torch.tensor): The source torch tensor.
"""
self.from_numpy(arr.contiguous())
@python_scope
def copy_from(self, other):
"""Copies all elements from another field.
The shape of the other field needs to be the same as `self`.
Args:
other (Field): The source field.
"""
if not isinstance(other, Field):
raise TypeError('Cannot copy from a non-field object')
if self.shape != other.shape:
raise ValueError(f"ti.field shape {self.shape} does not match"
f" the source field shape {other.shape}")
from taichi._kernels import tensor_to_tensor # pylint: disable=C0415
tensor_to_tensor(self, other)
@python_scope
def __setitem__(self, key, value):
"""Sets field element in Python scope.
Args:
key (Union[List[int], int, None]): Coordinates of the field element.
value (element type): Value to set.
"""
raise NotImplementedError()
@python_scope
def __getitem__(self, key):
"""Gets field element in Python scope.
Args:
key (Union[List[int], int, None]): Coordinates of the field element.
Returns:
element type: Value retrieved.
"""
raise NotImplementedError()
def __str__(self):
if taichi.lang.impl.inside_kernel():
return self.__repr__() # make pybind11 happy, see Matrix.__str__
if self._snode.ptr is None:
return '<Field: Definition of this field is incomplete>'
return str(self.to_numpy())
def _pad_key(self, key):
if key is None:
key = ()
if not isinstance(key, (tuple, list)):
key = (key, )
assert len(key) == len(self.shape)
return key + ((0, ) * (_ti_core.get_max_num_indices() - len(key)))
def _initialize_host_accessors(self):
if self.host_accessors:
return
taichi.lang.impl.get_runtime().materialize()
self.host_accessors = [
SNodeHostAccessor(e.ptr.snode()) for e in self.vars
]
def _host_access(self, key):
return [SNodeHostAccess(e, key) for e in self.host_accessors]
class ScalarField(Field):
"""Taichi scalar field with SNode implementation.
Args:
var (Expr): Field member.
"""
def __init__(self, var):
super().__init__([var])
@python_scope
def fill(self, val):
from taichi._kernels import fill_tensor # pylint: disable=C0415
fill_tensor(self, val)
@python_scope
def to_numpy(self, dtype=None):
if dtype is None:
dtype = to_numpy_type(self.dtype)
import numpy as np # pylint: disable=C0415
arr = np.zeros(shape=self.shape, dtype=dtype)
from taichi._kernels import tensor_to_ext_arr # pylint: disable=C0415
tensor_to_ext_arr(self, arr)
taichi.lang.runtime_ops.sync()
return arr
@python_scope
def to_torch(self, device=None):
import torch # pylint: disable=C0415
# pylint: disable=E1101
arr = torch.zeros(size=self.shape,
dtype=to_pytorch_type(self.dtype),
device=device)
from taichi._kernels import tensor_to_ext_arr # pylint: disable=C0415
tensor_to_ext_arr(self, arr)
taichi.lang.runtime_ops.sync()
return arr
@python_scope
def from_numpy(self, arr):
if len(self.shape) != len(arr.shape):
raise ValueError(f"ti.field shape {self.shape} does not match"
f" the numpy array shape {arr.shape}")
for i, _ in enumerate(self.shape):
if self.shape[i] != arr.shape[i]:
raise ValueError(f"ti.field shape {self.shape} does not match"
f" the numpy array shape {arr.shape}")
if hasattr(arr, 'contiguous'):
arr = arr.contiguous()
from taichi._kernels import ext_arr_to_tensor # pylint: disable=C0415
ext_arr_to_tensor(arr, self)
taichi.lang.runtime_ops.sync()
@python_scope
def __setitem__(self, key, value):
self._initialize_host_accessors()
self.host_accessors[0].setter(value, *self._pad_key(key))
@python_scope
def __getitem__(self, key):
self._initialize_host_accessors()
return self.host_accessors[0].getter(*self._pad_key(key))
def __repr__(self):
# make interactive shell happy, prevent materialization
return '<ti.field>'
class SNodeHostAccessor:
def __init__(self, snode):
if _ti_core.is_real(snode.data_type()):
def getter(*key):
assert len(key) == _ti_core.get_max_num_indices()
return snode.read_float(key)
def setter(value, *key):
assert len(key) == _ti_core.get_max_num_indices()
snode.write_float(key, value)
else:
if _ti_core.is_signed(snode.data_type()):
def getter(*key):
assert len(key) == _ti_core.get_max_num_indices()
return snode.read_int(key)
else:
def getter(*key):
assert len(key) == _ti_core.get_max_num_indices()
return snode.read_uint(key)
def setter(value, *key):
assert len(key) == _ti_core.get_max_num_indices()
snode.write_int(key, value)
self.getter = getter
self.setter = setter
class SNodeHostAccess:
def __init__(self, accessor, key):
self.accessor = accessor
self.key = key
__all__ = ["Field", "ScalarField"]
|
|
# Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
import copy
import datetime
from typing import Dict, List, Optional, TYPE_CHECKING, Union
from google.protobuf.timestamp_pb2 import Timestamp
from cirq_google.engine import calibration
from cirq_google.engine.client.quantum import types as qtypes
from cirq_google.engine.client.quantum import enums as qenums
from cirq_google.engine.abstract_processor import AbstractProcessor
from cirq_google.engine.abstract_program import AbstractProgram
if TYPE_CHECKING:
from cirq_google.engine.abstract_engine import AbstractEngine
from cirq_google.engine.abstract_local_program import AbstractLocalProgram
def _to_timestamp(union_time: Union[None, datetime.datetime, datetime.timedelta]):
"""Translate a datetime or timedelta into a number of seconds since epoch."""
if isinstance(union_time, datetime.timedelta):
return int((datetime.datetime.now() + union_time).timestamp())
elif isinstance(union_time, datetime.datetime):
return int(union_time.timestamp())
return None
class AbstractLocalProcessor(AbstractProcessor):
"""Partial implementation of AbstractProcessor using in-memory objects.
This implements reservation creation and scheduling using an in-memory
list for time slots and reservations. Any time slot not specified by
initialization is assumed to be UNALLOCATED (available for reservation).
Attributes:
processor_id: Unique string id of the processor.
engine: The parent `AbstractEngine` object, if available.
expected_down_time: Optional datetime of the next expected downtime.
For informational purpose only.
expected_recovery_time: Optional datetime when the processor is
expected to be available again. For informational purpose only.
schedule: List of time slots that the scheduling/reservation should
use. All time slots must be non-overlapping.
project_name: A project_name for resource naming.
"""
def __init__(
self,
*,
processor_id: str,
engine: Optional['AbstractEngine'] = None,
expected_down_time: Optional[datetime.datetime] = None,
expected_recovery_time: Optional[datetime.datetime] = None,
schedule: Optional[List[qtypes.QuantumTimeSlot]] = None,
project_name: str = 'fake_project',
):
self._engine = engine
self._expected_recovery_time = expected_recovery_time
self._expected_down_time = expected_down_time
self._reservations: Dict[str, qtypes.QuantumReservation] = {}
self._resource_id_counter = 0
self._processor_id = processor_id
self._project_name = project_name
if schedule is None:
self._schedule = [
qtypes.QuantumTimeSlot(
processor_name=self._processor_id,
slot_type=qenums.QuantumTimeSlot.TimeSlotType.UNALLOCATED,
)
]
else:
self._schedule = copy.copy(schedule)
self._schedule.sort(key=lambda t: t.start_time.seconds or -1)
for idx in range(len(self._schedule) - 1):
if self._schedule[idx].end_time.seconds > self._schedule[idx + 1].start_time.seconds:
raise ValueError('Time slots cannot overlap!')
@property
def processor_id(self) -> str:
"""Unique string id of the processor."""
return self._processor_id
def engine(self) -> Optional['AbstractEngine']:
"""Returns the parent Engine object.
Returns:
The program's parent Engine.
Raises:
ValueError: if no engine has been defined for this processor.
"""
return self._engine
def set_engine(self, engine):
"""Sets the parent processor."""
self._engine = engine
def expected_down_time(self) -> 'Optional[datetime.datetime]':
"""Returns the start of the next expected down time of the processor, if
set."""
return self._expected_down_time
def expected_recovery_time(self) -> 'Optional[datetime.datetime]':
"""Returns the expected the processor should be available, if set."""
return self._expected_recovery_time
def _create_id(self, id_type: str = 'reservation') -> str:
"""Creates a unique resource id for child objects."""
self._resource_id_counter += 1
return (
f'projects/{self._project_name}/'
f'processors/{self._processor_id}/'
f'{id_type}/{self._resource_id_counter}'
)
def _reservation_to_time_slot(
self, reservation: qtypes.QuantumReservation
) -> qtypes.QuantumTimeSlot:
"""Changes a reservation object into a time slot object."""
return qtypes.QuantumTimeSlot(
processor_name=self._processor_id,
start_time=Timestamp(seconds=reservation.start_time.seconds),
end_time=Timestamp(seconds=reservation.end_time.seconds),
slot_type=qenums.QuantumTimeSlot.TimeSlotType.RESERVATION,
)
def _insert_reservation_into(self, time_slot: qtypes.QuantumTimeSlot) -> None:
"""Inserts a new reservation time slot into the ordered schedule.
If this reservation overlaps with existing time slots, these slots will be
shortened, removed, or split to insert the new reservation.
"""
new_schedule = []
time_slot_inserted = False
for t in self._schedule:
if t.end_time.seconds and t.end_time.seconds <= time_slot.start_time.seconds:
# [--time_slot--]
# [--t--]
new_schedule.append(t)
continue
if t.start_time.seconds and t.start_time.seconds >= time_slot.end_time.seconds:
# [--time_slot--]
# [--t--]
new_schedule.append(t)
continue
if t.start_time.seconds and time_slot.start_time.seconds <= t.start_time.seconds:
if not time_slot_inserted:
new_schedule.append(time_slot)
time_slot_inserted = True
if not t.end_time.seconds or t.end_time.seconds > time_slot.end_time.seconds:
# [--time_slot---]
# [----t-----]
t.start_time.seconds = time_slot.end_time.seconds
new_schedule.append(t)
# if t.end_time < time_slot.end_time
# [------time_slot-----]
# [-----t-----]
# t should be removed
else:
if not t.end_time.seconds or t.end_time.seconds > time_slot.end_time.seconds:
# [---time_slot---]
# [-------------t---------]
# t should be split
start = qtypes.QuantumTimeSlot(
processor_name=self._processor_id,
end_time=Timestamp(seconds=time_slot.start_time.seconds),
slot_type=t.slot_type,
)
if t.start_time.seconds:
start.start_time.seconds = t.start_time.seconds
end = qtypes.QuantumTimeSlot(
processor_name=self._processor_id,
start_time=Timestamp(seconds=time_slot.end_time.seconds),
slot_type=t.slot_type,
)
if t.end_time.seconds:
end.end_time.seconds = t.end_time.seconds
new_schedule.append(start)
new_schedule.append(time_slot)
new_schedule.append(end)
else:
# [---time_slot---]
# [----t-----]
t.end_time.seconds = time_slot.start_time.seconds
new_schedule.append(t)
new_schedule.append(time_slot)
time_slot_inserted = True
if not time_slot_inserted:
new_schedule.append(time_slot)
self._schedule = new_schedule
def _is_available(self, time_slot: qtypes.QuantumTimeSlot) -> bool:
"""Returns True if the slot is available for reservation."""
for t in self._schedule:
if t.slot_type == qenums.QuantumTimeSlot.TimeSlotType.UNALLOCATED:
continue
if t.end_time.seconds and t.end_time.seconds <= time_slot.start_time.seconds:
continue
if t.start_time.seconds and t.start_time.seconds >= time_slot.end_time.seconds:
continue
return False
return True
def create_reservation(
self,
start_time: datetime.datetime,
end_time: datetime.datetime,
whitelisted_users: Optional[List[str]] = None,
) -> qtypes.QuantumReservation:
"""Creates a reservation on this processor.
Args:
start_time: the starting date/time of the reservation.
end_time: the ending date/time of the reservation.
whitelisted_users: a list of emails that are allowed
to send programs during this reservation (in addition to users
with permission "quantum.reservations.use" on the project).
Raises:
ValueError: if start_time is after end_time.
"""
if end_time < start_time:
raise ValueError('End time of reservation must be after the start time')
reservation_id = self._create_id()
new_reservation = qtypes.QuantumReservation(
name=reservation_id,
start_time=Timestamp(seconds=int(start_time.timestamp())),
end_time=Timestamp(seconds=int(end_time.timestamp())),
whitelisted_users=whitelisted_users,
)
time_slot = self._reservation_to_time_slot(new_reservation)
if not self._is_available(time_slot):
raise ValueError('Time slot is not available for reservations')
self._reservations[reservation_id] = new_reservation
self._insert_reservation_into(time_slot)
return new_reservation
def remove_reservation(self, reservation_id: str) -> None:
"""Removes a reservation on this processor."""
if reservation_id in self._reservations:
del self._reservations[reservation_id]
def get_reservation(self, reservation_id: str) -> qtypes.QuantumReservation:
"""Retrieve a reservation given its id."""
if reservation_id in self._reservations:
return self._reservations[reservation_id]
else:
return None
def update_reservation(
self,
reservation_id: str,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
whitelisted_users: Optional[List[str]] = None,
) -> None:
"""Updates a reservation with new information.
Updates a reservation with a new start date, end date, or
list of additional users. For each field, it the argument is left as
None, it will not be updated.
Args:
reservation_id: The string identifier of the reservation to change.
start_time: New starting time of the reservation. If unspecified,
starting time is left unchanged.
end_time: New ending time of the reservation. If unspecified,
ending time is left unchanged.
whitelisted_users: The new list of whitelisted users to allow on
the reservation. If unspecified, the users are left unchanged.
Raises:
ValueError: if reservation_id does not exist.
"""
if reservation_id not in self._reservations:
raise ValueError(f'Reservation id {reservation_id} does not exist.')
if start_time:
self._reservations[reservation_id].start_time.seconds = _to_timestamp(start_time)
if end_time:
self._reservations[reservation_id].end_time.seconds = _to_timestamp(end_time)
if whitelisted_users:
del self._reservations[reservation_id].whitelisted_users[:]
self._reservations[reservation_id].whitelisted_users.extend(whitelisted_users)
def list_reservations(
self,
from_time: Union[None, datetime.datetime, datetime.timedelta] = datetime.timedelta(),
to_time: Union[None, datetime.datetime, datetime.timedelta] = datetime.timedelta(weeks=2),
) -> List[qtypes.QuantumReservation]:
"""Retrieves the reservations from a processor.
Only reservations from this processor and project will be
returned. The schedule may be filtered by starting and ending time.
Args:
from_time: Filters the returned reservations to only include entries
that end no earlier than the given value. Specified either as an
absolute time (datetime.datetime) or as a time relative to now
(datetime.timedelta). Defaults to now (a relative time of 0).
Set to None to omit this filter.
to_time: Filters the returned reservations to only include entries
that start no later than the given value. Specified either as an
absolute time (datetime.datetime) or as a time relative to now
(datetime.timedelta). Defaults to two weeks from now (a relative
time of two weeks). Set to None to omit this filter.
Returns:
A list of reservations.
"""
start_timestamp = _to_timestamp(from_time)
end_timestamp = _to_timestamp(to_time)
reservation_list = []
for reservation in self._reservations.values():
if end_timestamp and reservation.start_time.seconds > end_timestamp:
continue
if start_timestamp and reservation.end_time.seconds < start_timestamp:
continue
reservation_list.append(reservation)
return reservation_list
def get_schedule(
self,
from_time: Union[None, datetime.datetime, datetime.timedelta] = datetime.timedelta(),
to_time: Union[None, datetime.datetime, datetime.timedelta] = datetime.timedelta(weeks=2),
time_slot_type: Optional[qenums.QuantumTimeSlot.TimeSlotType] = None,
) -> List[qtypes.QuantumTimeSlot]:
"""Retrieves the schedule for a processor.
The schedule may be filtered by time.
Args:
from_time: Filters the returned schedule to only include entries
that end no earlier than the given value. Specified either as an
absolute time (datetime.datetime) or as a time relative to now
(datetime.timedelta). Defaults to now (a relative time of 0).
Set to None to omit this filter.
to_time: Filters the returned schedule to only include entries
that start no later than the given value. Specified either as an
absolute time (datetime.datetime) or as a time relative to now
(datetime.timedelta). Defaults to two weeks from now (a relative
time of two weeks). Set to None to omit this filter.
time_slot_type: Filters the returned schedule to only include
entries with a given type (e.g. maintenance, open swim).
Defaults to None. Set to None to omit this filter.
Returns:
Time slots that fit the criteria.
"""
time_slots: List[qtypes.QuantumTimeSlot] = []
start_timestamp = _to_timestamp(from_time)
end_timestamp = _to_timestamp(to_time)
for slot in self._schedule:
if (
start_timestamp
and slot.end_time.seconds
and slot.end_time.seconds < start_timestamp
):
continue
if (
end_timestamp
and slot.start_time.seconds
and slot.start_time.seconds > end_timestamp
):
continue
time_slots.append(slot)
return time_slots
@abstractmethod
def get_latest_calibration(self, timestamp: int) -> Optional[calibration.Calibration]:
"""Returns the latest calibration with the provided timestamp or earlier."""
@abstractmethod
def get_program(self, program_id: str) -> AbstractProgram:
"""Returns an AbstractProgram for an existing Quantum Engine program.
Args:
program_id: Unique ID of the program within the parent project.
Returns:
An AbstractProgram for the program.
"""
@abstractmethod
def list_programs(
self,
created_before: Optional[Union[datetime.datetime, datetime.date]] = None,
created_after: Optional[Union[datetime.datetime, datetime.date]] = None,
has_labels: Optional[Dict[str, str]] = None,
) -> List['AbstractLocalProgram']:
"""Returns a list of previously executed quantum programs.
Args:
created_after: retrieve programs that were created after this date
or time.
created_before: retrieve programs that were created before this date
or time.
has_labels: retrieve programs that have labels on them specified by
this dict. If the value is set to `*`, filters having the label
regardless of the label value will be filtered. For example, to
query programs that have the shape label and have the color
label with value red can be queried using
`{'color: red', 'shape:*'}`
"""
|
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Backup manager manages volume backups.
Volume Backups are full copies of persistent volumes stored in a backup
store e.g. an object store or any other backup store if and when support is
added. They are usable without the original object being available. A
volume backup can be restored to the original volume it was created from or
any other available volume with a minimum size of the original volume.
Volume backups can be created, restored, deleted and listed.
**Related Flags**
:backup_topic: What :mod:`rpc` topic to listen to (default:
`storage-backup`).
:backup_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`storage.backup.manager.Manager`).
"""
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import importutils
import six
from jacket.storage.backup import driver
from jacket.storage.backup import rpcapi as backup_rpcapi
from jacket import context
from jacket.storage import exception
from jacket.storage.i18n import _, _LE, _LI, _LW
from jacket.storage import manager
from jacket.objects import storage
from jacket.objects.storage import fields
from jacket.storage import quota
from jacket import rpc
from jacket.storage import utils
from jacket.worker import rpcapi as jacket_rpcapi
from jacket.storage.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
backup_manager_opts = [
cfg.StrOpt('backup_driver',
#default='jacket.storage.backup.drivers.swift',
default='jacket.drivers.openstack.volume_driver',
help='Driver to use for backups.',),
cfg.BoolOpt('backup_service_inithost_offload',
default=False,
help='Offload pending backup delete during '
'backup service startup.',),
]
# This map doesn't need to be extended in the future since it's only
# for old backup services
mapper = {'jacket.storage.backup.services.swift': 'jacket.storage.backup.drivers.swift',
'jacket.storage.backup.services.ceph': 'jacket.storage.backup.drivers.ceph'}
CONF = cfg.CONF
CONF.register_opts(backup_manager_opts)
CONF.import_opt('use_multipath_for_image_xfer', 'jacket.storage.volume.driver')
CONF.import_opt('num_volume_device_scan_tries', 'jacket.storage.volume.driver')
QUOTAS = quota.QUOTAS
class BackupManager(manager.SchedulerDependentManager):
"""Manages backup of block storage devices."""
RPC_API_VERSION = '2.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, service_name=None, *args, **kwargs):
self.service = importutils.import_module(self.driver_name)
self.az = CONF.storage_availability_zone
self.volume_managers = {}
# TODO(xyang): If backup_use_same_host is True, we'll find
# the volume backend on the backup node. This allows us
# to use a temp snapshot to backup an in-use volume if the
# driver supports it. This code should go away when we add
# support for backing up in-use volume using a temp snapshot
# on a remote node.
if CONF.backup_use_same_host:
self._setup_volume_drivers()
self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.jacket_rpcapi = jacket_rpcapi.JacketAPI()
super(BackupManager, self).__init__(service_name='backup',
*args, **kwargs)
self.additional_endpoints.append(_BackupV1Proxy(self))
def _init_volume_driver(self, ctxt, driver):
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)."),
{'driver_name': driver.__class__.__name__,
'version': driver.get_version()})
try:
driver.do_setup(ctxt)
driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Error encountered during initialization of "
"driver: %(name)s."),
{'name': driver.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
driver.set_initialized()
def _get_volume_backend(self, host=None, allow_null_host=False):
if host is None:
if not allow_null_host:
msg = _("NULL host not allowed for volume backend lookup.")
raise exception.BackupFailedToGetVolumeBackend(msg)
else:
LOG.debug("Checking hostname '%s' for backend info.", host)
# NOTE(xyang): If host='myhost@lvmdriver', backend='lvmdriver'
# by the logic below. This is different from extract_host.
# vol_utils.extract_host(host, 'backend')='myhost@lvmdriver'.
part = host.partition('@')
if (part[1] == '@') and (part[2] != ''):
backend = part[2]
LOG.debug("Got backend '%s'.", backend)
return backend
LOG.info(_LI("Backend not found in hostname (%s) so using default."),
host)
if 'default' not in self.volume_managers:
# For multi-backend we just pick the top of the list.
return self.volume_managers.keys()[0]
return 'default'
def _get_manager(self, backend):
LOG.debug("Manager requested for volume_backend '%s'.",
backend)
if backend is None:
LOG.debug("Fetching default backend.")
backend = self._get_volume_backend(allow_null_host=True)
if backend not in self.volume_managers:
msg = (_("Volume manager for backend '%s' does not exist.") %
(backend))
raise exception.BackupFailedToGetVolumeBackend(msg)
return self.volume_managers[backend]
def _get_driver(self, backend=None):
LOG.debug("Driver requested for volume_backend '%s'.",
backend)
if backend is None:
LOG.debug("Fetching default backend.")
backend = self._get_volume_backend(allow_null_host=True)
mgr = self._get_manager(backend)
mgr.driver.db = self.db
return mgr.driver
def _setup_volume_drivers(self):
if CONF.enabled_backends:
for backend in CONF.enabled_backends:
host = "%s@%s" % (CONF.host, backend)
mgr = importutils.import_object(CONF.volume_manager,
host=host,
service_name=backend)
config = mgr.configuration
backend_name = config.safe_get('volume_backend_name')
LOG.debug("Registering backend %(backend)s (host=%(host)s "
"backend_name=%(backend_name)s).",
{'backend': backend, 'host': host,
'backend_name': backend_name})
self.volume_managers[backend] = mgr
else:
default = importutils.import_object(CONF.volume_manager)
LOG.debug("Registering default backend %s.", default)
self.volume_managers['default'] = default
@property
def driver_name(self):
"""This function maps old backup services to backup drivers."""
return self._map_service_to_driver(CONF.backup_driver)
def _map_service_to_driver(self, service):
"""Maps services to drivers."""
if service in mapper:
return mapper[service]
return service
def _update_backup_error(self, backup, context, err):
backup.status = fields.BackupStatus.ERROR
backup.fail_reason = err
backup.save()
def init_host(self):
"""Run initialization needed for a standalone service."""
ctxt = context.get_admin_context()
for mgr in self.volume_managers.values():
self._init_volume_driver(ctxt, mgr.driver)
try:
self._cleanup_incomplete_backup_operations(ctxt)
except Exception:
# Don't block startup of the backup service.
LOG.exception(_LE("Problem cleaning incomplete backup "
"operations."))
def reset(self):
super(BackupManager, self).reset()
self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.jacket_rpcapi = jacket_rpcapi.JacketAPI()
def _cleanup_incomplete_backup_operations(self, ctxt):
LOG.info(_LI("Cleaning up incomplete backup operations."))
# TODO(smulcahy) implement full resume of backup and restore
# operations on restart (rather than simply resetting)
backups = storage.BackupList.get_all_by_host(ctxt, self.host)
for backup in backups:
try:
self._cleanup_one_backup(ctxt, backup)
except Exception:
LOG.exception(_LE("Problem cleaning up backup %(bkup)s."),
{'bkup': backup['id']})
try:
self._cleanup_temp_volumes_snapshots_for_one_backup(ctxt,
backup)
except Exception:
LOG.exception(_LE("Problem cleaning temp volumes and "
"snapshots for backup %(bkup)s."),
{'bkup': backup['id']})
def _cleanup_one_volume(self, ctxt, volume):
if volume['status'] == 'backing-up':
self._detach_all_attachments(ctxt, volume)
LOG.info(_LI('Resetting volume %(vol_id)s to previous '
'status %(status)s (was backing-up).'),
{'vol_id': volume['id'],
'status': volume['previous_status']})
self.db.volume_update(ctxt, volume['id'],
{'status': volume['previous_status']})
elif volume['status'] == 'restoring-backup':
self._detach_all_attachments(ctxt, volume)
LOG.info(_LI('setting volume %s to error_restoring '
'(was restoring-backup).'), volume['id'])
self.db.volume_update(ctxt, volume['id'],
{'status': 'error_restoring'})
def _cleanup_one_backup(self, ctxt, backup):
if backup['status'] == fields.BackupStatus.CREATING:
LOG.info(_LI('Resetting backup %s to error (was creating).'),
backup['id'])
volume = storage.Volume.get_by_id(ctxt, backup.volume_id)
self._cleanup_one_volume(ctxt, volume)
err = 'incomplete backup reset on manager restart'
self._update_backup_error(backup, ctxt, err)
elif backup['status'] == fields.BackupStatus.RESTORING:
LOG.info(_LI('Resetting backup %s to '
'available (was restoring).'),
backup['id'])
volume = storage.Volume.get_by_id(ctxt, backup.restore_volume_id)
self._cleanup_one_volume(ctxt, volume)
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
elif backup['status'] == fields.BackupStatus.DELETING:
LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
if CONF.backup_service_inithost_offload:
# Offload all the pending backup delete operations to the
# threadpool to prevent the main backup service thread
# from being blocked.
self._add_to_threadpool(self.delete_backup, ctxt, backup)
else:
# By default, delete backups sequentially
self.delete_backup(ctxt, backup)
def _detach_all_attachments(self, ctxt, volume):
attachments = volume['volume_attachment'] or []
for attachment in attachments:
if (attachment['attached_host'] == self.host and
attachment['instance_uuid'] is None):
try:
rpcapi = self.jacket_rpcapi
rpcapi.detach_volume(ctxt, volume, attachment['id'])
except Exception:
LOG.exception(_LE("Detach attachment %(attach_id)s"
" failed."),
{'attach_id': attachment['id']},
resource=volume)
def _delete_temp_volume(self, ctxt, backup):
try:
temp_volume = storage.Volume.get_by_id(
ctxt, backup.temp_volume_id)
self.jacket_rpcapi.delete_volume(ctxt, temp_volume)
except exception.VolumeNotFound:
LOG.debug("Could not find temp volume %(vol)s to clean up "
"for backup %(backup)s.",
{'vol': backup.temp_volume_id,
'backup': backup.id})
backup.temp_volume_id = None
backup.save()
def _delete_temp_snapshot(self, ctxt, backup):
try:
temp_snapshot = storage.Snapshot.get_by_id(
ctxt, backup.temp_snapshot_id)
volume = storage.Volume.get_by_id(
ctxt, backup.volume_id)
# The temp snapshot should be deleted directly thru the
# volume driver, not thru the volume manager.
self.jacket_rpcapi.delete_snapshot(ctxt, temp_snapshot,
volume.host)
except exception.SnapshotNotFound:
LOG.debug("Could not find temp snapshot %(snap)s to clean "
"up for backup %(backup)s.",
{'snap': backup.temp_snapshot_id,
'backup': backup.id})
backup.temp_snapshot_id = None
backup.save()
def _cleanup_temp_volumes_snapshots_for_one_backup(self, ctxt, backup):
# NOTE(xyang): If the service crashes or gets restarted during the
# backup operation, there could be temporary volumes or snapshots
# that are not deleted. Make sure any temporary volumes or snapshots
# create by the backup job are deleted when service is started.
if (backup.temp_volume_id
and backup.status == fields.BackupStatus.ERROR):
self._delete_temp_volume(ctxt, backup)
if (backup.temp_snapshot_id
and backup.status == fields.BackupStatus.ERROR):
self._delete_temp_snapshot(ctxt, backup)
def _cleanup_temp_volumes_snapshots_when_backup_created(
self, ctxt, backup):
# Delete temp volumes or snapshots when backup creation is completed.
if backup.temp_volume_id:
self._delete_temp_volume(ctxt, backup)
if backup.temp_snapshot_id:
self._delete_temp_snapshot(ctxt, backup)
def create_backup(self, context, backup):
"""Create volume backups using configured backup service."""
volume_id = backup.volume_id
volume = storage.Volume.get_by_id(context, volume_id)
previous_status = volume.get('previous_status', None)
LOG.info(_LI('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'),
{'backup_id': backup.id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "create.start")
backup.host = self.host
backup.service = self.driver_name
backup.availability_zone = self.az
backup.save()
expected_status = 'backing-up'
actual_status = volume['status']
if actual_status != expected_status:
err = _('Create backup aborted, expected volume status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
self._update_backup_error(backup, context, err)
raise exception.InvalidVolume(reason=err)
expected_status = fields.BackupStatus.CREATING
actual_status = backup.status
if actual_status != expected_status:
err = _('Create backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
self._update_backup_error(backup, context, err)
backup.save()
raise exception.InvalidBackup(reason=err)
try:
self._run_backup(context, backup, volume)
except Exception as err:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': previous_status,
'previous_status': 'error_backing-up'})
self._update_backup_error(backup, context, six.text_type(err))
# Restore the original status.
self.db.volume_update(context, volume_id,
{'status': previous_status,
'previous_status': 'backing-up'})
backup.status = fields.BackupStatus.AVAILABLE
backup.size = volume['size']
backup.save()
# Handle the num_dependent_backups of parent backup when child backup
# has created successfully.
if backup.parent_id:
parent_backup = storage.Backup.get_by_id(context,
backup.parent_id)
parent_backup.num_dependent_backups += 1
parent_backup.save()
LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
self._notify_about_backup_usage(context, backup, "create.end")
def _run_backup(self, context, backup, volume):
backup_service = self.service.get_backup_driver(context)
try:
backup_service.create_backup(context, backup, volume)
except Exception:
raise
properties = utils.brick_get_connector_properties()
backup_dic = self.jacket_rpcapi.get_backup_device(context,
backup, volume)
try:
backup_device = backup_dic.get('backup_device')
is_snapshot = backup_dic.get('is_snapshot')
attach_info = self._attach_device(context, backup_device,
properties, is_snapshot)
try:
device_path = attach_info['device']['path']
if isinstance(device_path, six.string_types):
if backup_dic.get('secure_enabled', False):
with open(device_path) as device_file:
backup_service.backup(backup, device_file)
else:
with utils.temporary_chown(device_path):
with open(device_path) as device_file:
backup_service.backup(backup, device_file)
else:
backup_service.backup(backup, device_path)
finally:
self._detach_device(context, attach_info,
backup_device, properties,
is_snapshot)
finally:
backup = storage.Backup.get_by_id(context, backup.id)
self._cleanup_temp_volumes_snapshots_when_backup_created(
context, backup)
def restore_backup(self, context, backup, volume_id):
"""Restore volume backups from configured backup service."""
LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'),
{'backup_id': backup.id, 'volume_id': volume_id})
volume = storage.Volume.get_by_id(context, volume_id)
self._notify_about_backup_usage(context, backup, "restore.start")
backup.host = self.host
backup.save()
expected_status = 'restoring-backup'
actual_status = volume['status']
if actual_status != expected_status:
err = (_('Restore backup aborted, expected volume status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
raise exception.InvalidVolume(reason=err)
expected_status = fields.BackupStatus.RESTORING
actual_status = backup['status']
if actual_status != expected_status:
err = (_('Restore backup aborted: expected backup status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
self._update_backup_error(backup, context, err)
self.db.volume_update(context, volume_id, {'status': 'error'})
raise exception.InvalidBackup(reason=err)
if volume['size'] > backup['size']:
LOG.info(_LI('Volume: %(vol_id)s, size: %(vol_size)d is '
'larger than backup: %(backup_id)s, '
'size: %(backup_size)d, continuing with restore.'),
{'vol_id': volume['id'],
'vol_size': volume['size'],
'backup_id': backup['id'],
'backup_size': backup['size']})
backup_service = self._map_service_to_driver(backup['service'])
configured_service = self.driver_name
if backup_service != configured_service:
err = _('Restore backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].') % {
'configured_service': configured_service,
'backup_service': backup_service,
}
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
self.db.volume_update(context, volume_id, {'status': 'error'})
raise exception.InvalidBackup(reason=err)
try:
self._run_restore(context, backup, volume)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'error_restoring'})
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
self.db.volume_update(context, volume_id, {'status': 'available'})
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
' to volume %(volume_id)s.'),
{'backup_id': backup.id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "restore.end")
def _run_restore(self, context, backup, volume):
backup_service = self.service.get_backup_driver(context)
properties = utils.brick_get_connector_properties()
secure_enabled = (
self.jacket_rpcapi.secure_file_operations_enabled(context,
volume))
attach_info = self._attach_device(context, volume, properties)
try:
device_path = attach_info['device']['path']
if isinstance(device_path, six.string_types):
if secure_enabled:
with open(device_path, 'wb') as device_file:
backup_service.restore(backup, volume.id, device_file)
else:
with utils.temporary_chown(device_path):
with open(device_path, 'wb') as device_file:
backup_service.restore(backup, volume.id,
device_file)
else:
backup_service.restore(backup, volume.id, device_path)
finally:
self._detach_device(context, attach_info, volume, properties)
def delete_backup(self, context, backup):
"""Delete volume backup from configured backup service."""
LOG.info(_LI('Delete backup started, backup: %s.'), backup.id)
self._notify_about_backup_usage(context, backup, "delete.start")
backup.host = self.host
backup.save()
expected_status = fields.BackupStatus.DELETING
actual_status = backup.status
if actual_status != expected_status:
err = _('Delete_backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') \
% {'expected_status': expected_status,
'actual_status': actual_status}
self._update_backup_error(backup, context, err)
raise exception.InvalidBackup(reason=err)
backup_service = self._map_service_to_driver(backup['service'])
if backup_service is not None:
configured_service = self.driver_name
if backup_service != configured_service:
err = _('Delete backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].')\
% {'configured_service': configured_service,
'backup_service': backup_service}
self._update_backup_error(backup, context, err)
raise exception.InvalidBackup(reason=err)
try:
backup_service = self.service.get_backup_driver(context)
backup_service.delete(backup)
except Exception as err:
with excutils.save_and_reraise_exception():
self._update_backup_error(backup, context,
six.text_type(err))
# Get reservations
try:
reserve_opts = {
'backups': -1,
'backup_gigabytes': -backup.size,
}
reservations = QUOTAS.reserve(context,
project_id=backup.project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting backup"))
backup.destroy()
# If this backup is incremental backup, handle the
# num_dependent_backups of parent backup
if backup.parent_id:
parent_backup = storage.Backup.get_by_id(context,
backup.parent_id)
if parent_backup.has_dependent_backups:
parent_backup.num_dependent_backups -= 1
parent_backup.save()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations,
project_id=backup.project_id)
LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup.id)
self._notify_about_backup_usage(context, backup, "delete.end")
def _notify_about_backup_usage(self,
context,
backup,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_backup_usage(
context, backup, event_suffix,
extra_usage_info=extra_usage_info,
host=self.host)
def export_record(self, context, backup):
"""Export all volume backup metadata details to allow clean import.
Export backup metadata so it could be re-imported into the database
without any prerequisite in the backup database.
:param context: running context
:param backup: backup object to export
:returns: backup_record - a description of how to import the backup
:returns: contains 'backup_url' - how to import the backup, and
:returns: 'backup_service' describing the needed driver.
:raises: InvalidBackup
"""
LOG.info(_LI('Export record started, backup: %s.'), backup.id)
expected_status = fields.BackupStatus.AVAILABLE
actual_status = backup.status
if actual_status != expected_status:
err = (_('Export backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
raise exception.InvalidBackup(reason=err)
backup_record = {}
backup_record['backup_service'] = backup.service
backup_service = self._map_service_to_driver(backup.service)
configured_service = self.driver_name
if backup_service != configured_service:
err = (_('Export record aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].') %
{'configured_service': configured_service,
'backup_service': backup_service})
raise exception.InvalidBackup(reason=err)
# Call driver to create backup description string
try:
backup_service = self.service.get_backup_driver(context)
driver_info = backup_service.export_record(backup)
backup_url = backup.encode_record(driver_info=driver_info)
backup_record['backup_url'] = backup_url
except Exception as err:
msg = six.text_type(err)
raise exception.InvalidBackup(reason=msg)
LOG.info(_LI('Export record finished, backup %s exported.'), backup.id)
return backup_record
def import_record(self,
context,
backup,
backup_service,
backup_url,
backup_hosts):
"""Import all volume backup metadata details to the backup db.
:param context: running context
:param backup: The new backup object for the import
:param backup_service: The needed backup driver for import
:param backup_url: An identifier string to locate the backup
:param backup_hosts: Potential hosts to execute the import
:raises: InvalidBackup
:raises: ServiceNotFound
"""
LOG.info(_LI('Import record started, backup_url: %s.'), backup_url)
# Can we import this backup?
if (backup_service != self.driver_name):
# No, are there additional potential backup hosts in the list?
if len(backup_hosts) > 0:
# try the next host on the list, maybe he can import
first_host = backup_hosts.pop()
self.backup_rpcapi.import_record(context,
first_host,
backup,
backup_service,
backup_url,
backup_hosts)
else:
# empty list - we are the last host on the list, fail
err = _('Import record failed, cannot find backup '
'service to perform the import. Request service '
'%(service)s') % {'service': backup_service}
self._update_backup_error(backup, context, err)
raise exception.ServiceNotFound(service_id=backup_service)
else:
# Yes...
try:
# Deserialize backup record information
backup_options = backup.decode_record(backup_url)
# Extract driver specific info and pass it to the driver
driver_options = backup_options.pop('driver_info', {})
backup_service = self.service.get_backup_driver(context)
backup_service.import_record(backup, driver_options)
except Exception as err:
msg = six.text_type(err)
self._update_backup_error(backup, context, msg)
raise exception.InvalidBackup(reason=msg)
required_import_options = {
'display_name',
'display_description',
'container',
'size',
'service_metadata',
'service',
'object_count',
'id'
}
# Check for missing fields in imported data
missing_opts = required_import_options - set(backup_options)
if missing_opts:
msg = (_('Driver successfully decoded imported backup data, '
'but there are missing fields (%s).') %
', '.join(missing_opts))
self._update_backup_error(backup, context, msg)
raise exception.InvalidBackup(reason=msg)
# Confirm the ID from the record in the DB is the right one
backup_id = backup_options['id']
if backup_id != backup.id:
msg = (_('Trying to import backup metadata from id %(meta_id)s'
' into backup %(id)s.') %
{'meta_id': backup_id, 'id': backup.id})
self._update_backup_error(backup, context, msg)
raise exception.InvalidBackup(reason=msg)
# Overwrite some fields
backup_options['status'] = fields.BackupStatus.AVAILABLE
backup_options['service'] = self.driver_name
backup_options['availability_zone'] = self.az
backup_options['host'] = self.host
# Remove some values which are not actual fields and some that
# were set by the API node
for key in ('name', 'user_id', 'project_id'):
backup_options.pop(key, None)
# Update the database
backup.update(backup_options)
backup.save()
# Verify backup
try:
if isinstance(backup_service, driver.BackupDriverWithVerify):
backup_service.verify(backup.id)
else:
LOG.warning(_LW('Backup service %(service)s does not '
'support verify. Backup id %(id)s is '
'not verified. Skipping verify.'),
{'service': self.driver_name,
'id': backup.id})
except exception.InvalidBackup as err:
with excutils.save_and_reraise_exception():
self._update_backup_error(backup, context,
six.text_type(err))
LOG.info(_LI('Import record id %s metadata from driver '
'finished.'), backup.id)
def reset_status(self, context, backup, status):
"""Reset volume backup status.
:param context: running context
:param backup: The backup object for reset status operation
:param status: The status to be set
:raises: InvalidBackup
:raises: BackupVerifyUnsupportedDriver
:raises: AttributeError
"""
LOG.info(_LI('Reset backup status started, backup_id: '
'%(backup_id)s, status: %(status)s.'),
{'backup_id': backup.id,
'status': status})
backup_service = self._map_service_to_driver(backup.service)
LOG.info(_LI('Backup service: %s.'), backup_service)
if backup_service is not None:
configured_service = self.driver_name
if backup_service != configured_service:
err = _('Reset backup status aborted, the backup service'
' currently configured [%(configured_service)s] '
'is not the backup service that was used to create'
' this backup [%(backup_service)s].') % \
{'configured_service': configured_service,
'backup_service': backup_service}
raise exception.InvalidBackup(reason=err)
# Verify backup
try:
# check whether the backup is ok or not
if (status == fields.BackupStatus.AVAILABLE
and backup['status'] != fields.BackupStatus.RESTORING):
# check whether we could verify the backup is ok or not
if isinstance(backup_service,
driver.BackupDriverWithVerify):
backup_service.verify(backup.id)
backup.status = status
backup.save()
# driver does not support verify function
else:
msg = (_('Backup service %(configured_service)s '
'does not support verify. Backup id'
' %(id)s is not verified. '
'Skipping verify.') %
{'configured_service': self.driver_name,
'id': backup.id})
raise exception.BackupVerifyUnsupportedDriver(
reason=msg)
# reset status to error or from restoring to available
else:
if (status == fields.BackupStatus.ERROR or
(status == fields.BackupStatus.AVAILABLE and
backup.status == fields.BackupStatus.RESTORING)):
backup.status = status
backup.save()
except exception.InvalidBackup:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Backup id %s is not invalid. "
"Skipping reset."), backup.id)
except exception.BackupVerifyUnsupportedDriver:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Backup service %(configured_service)s '
'does not support verify. Backup id '
'%(id)s is not verified. '
'Skipping verify.'),
{'configured_service': self.driver_name,
'id': backup.id})
except AttributeError:
msg = (_('Backup service %(service)s does not support '
'verify. Backup id %(id)s is not verified. '
'Skipping reset.') %
{'service': self.driver_name,
'id': backup.id})
LOG.error(msg)
raise exception.BackupVerifyUnsupportedDriver(
reason=msg)
# Needs to clean temporary volumes and snapshots.
try:
self._cleanup_temp_volumes_snapshots_for_one_backup(
context, backup)
except Exception:
LOG.exception(_LE("Problem cleaning temp volumes and "
"snapshots for backup %(bkup)s."),
{'bkup': backup.id})
# send notification to ceilometer
notifier_info = {'id': backup.id, 'update': {'status': status}}
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, "backups.reset_status.end",
notifier_info)
def check_support_to_force_delete(self, context):
"""Check if the backup driver supports force delete operation.
:param context: running context
"""
backup_service = self.service.get_backup_driver(context)
return backup_service.support_force_delete
def _attach_device(self, context, backup_device,
properties, is_snapshot=False):
"""Attach backup device."""
if not is_snapshot:
return self._attach_volume(context, backup_device, properties)
else:
volume = self.db.volume_get(context, backup_device.volume_id)
host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=host)
rc = self._get_driver(backend)._attach_snapshot(
context, backup_device, properties)
return rc
def _attach_volume(self, context, volume, properties):
"""Attach a volume."""
try:
conn = self.jacket_rpcapi.initialize_connection(context,
volume,
properties)
return self._connect_device(conn)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.jacket_rpcapi.terminate_connection(context, volume,
properties,
force=True)
except Exception:
LOG.warning(_LW("Failed to terminate the connection "
"of volume %(volume_id)s, but it is "
"acceptable."),
{'volume_id', volume.id})
def _connect_device(self, conn):
"""Establish connection to device."""
use_multipath = CONF.use_multipath_for_image_xfer
device_scan_attempts = CONF.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _detach_device(self, context, attach_info, device,
properties, is_snapshot=False, force=False):
"""Disconnect the volume or snapshot from the host. """
connector = attach_info['connector']
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
rpcapi = self.jacket_rpcapi
if not is_snapshot:
rpcapi.terminate_connection(context, device, properties,
force=force)
rpcapi.remove_export(context, device)
else:
volume = self.db.volume_get(context, device.volume_id)
host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=host)
self._get_driver(backend)._detach_snapshot(
context, attach_info, device, properties, force)
# TODO(dulek): This goes away immediately in Newton and is just present in
# Mitaka so that we can receive v1.x and v2.0 messages.
class _BackupV1Proxy(object):
target = messaging.Target(version='1.3')
def __init__(self, manager):
self.manager = manager
def create_backup(self, context, backup):
return self.manager.create_backup(context, backup)
def restore_backup(self, context, backup, volume_id):
return self.manager.restore_backup(context, backup, volume_id)
def delete_backup(self, context, backup):
return self.manager.delete_backup(context, backup)
def export_record(self, context, backup):
return self.manager.export_record(context, backup)
def import_record(self, context, backup, backup_service, backup_url,
backup_hosts):
return self.manager.import_record(context, backup, backup_service,
backup_url, backup_hosts)
def reset_status(self, context, backup, status):
return self.manager.reset_status(context, backup, status)
def check_support_to_force_delete(self, context):
return self.manager.check_support_to_force_delete(context)
|
|
import sys
import json
import time
"""
Requirements:
users want to filter log messages by
severity (e.g. warn, info, debug, trace)
and / or
topic (e.g. p2p, vm)
topical logs may cross module boundaries
lazy logging (i.e. lazy evaluation of expensive log messages)
logs should be consumable by software
"""
#HOWTO: log levels defined by groups
log_level_names = ('critical', 'error', 'warn', 'info', 'debug', 'trace')
log_levels = dict((n, i) for i, n in enumerate(log_level_names))
class LogGroup(object):
"""
combines multiple logging (to e.g. a topic)
LogGroups can be nested
"""
is_active = 0
def __init__(self, name, *logger_or_groups):
assert isinstance(name, (str, unicode))
self.name = name
self.loggers = []
self.listeners = []
for l in logger_or_groups:
self.add_logger(l)
def activate(self):
for l in self.list():
l.is_active += 1 # loggers can be in multiple groups
def deactivate(self):
for l in self.list():
l.is_active = max(0, l.is_active - 1)
def add_logger(self, logger_or_group):
assert isinstance(logger_or_group, (LogGroup, Logger))
logger_or_group.add_listener(self.log_cb)
self.loggers.append(logger_or_group)
def remove_logger(self, logger_or_group):
assert isinstance(logger_or_group, (LogGroup, Logger))
logger_or_group.remove_listener(self.log_cb)
self.loggers.remove(logger_or_group)
def add_listener(self, cb):
self.listeners.append(cb)
def remove_listener(self, cb):
self.listeners.remove(cb)
def log_cb(self, logger, name, data):
for cb in self.listeners:
cb(logger, name, data)
def list(self, level=log_level_names[-1]):
"list all included loggers"
loggers = []
for l in self.loggers:
if isinstance(l, LogGroup):
loggers.extend(l.list())
else:
loggers.append(l)
return [l for l in loggers if log_levels[l.level] <= log_levels[level]]
class LogManager(object):
writer = None
def __init__(self):
self.loggers = []
self.groups = []
def items(self):
return dict([(l.name, l) for l in self.loggers] + [(g.name, g) for g in self.groups])
def create(self, name, level='warn'):
assert name not in self.items().keys()
l = Logger(name, level)
self.loggers.append(l)
return l
def list(self, level=log_level_names[-1]):
return [l for l in self.loggers if log_levels[l.level] <= log_levels[level]]
def group(self, name, *loggers):
assert name not in self.items().keys()
g = LogGroup(name, *loggers)
self.groups.append(g)
return g
def list_groups(self):
return self.groups[:]
def get(self, name):
print name
try:
return [i for n, i in self.items().items() if n == name][0]
except IndexError:
raise KeyError(name)
class LazyLog(object):
"use for expensive log formattings, func is only called when receiving logger is_active"
def __init__(self, name, func):
self.name = name
self.func = func
class BaseFormatter(object):
def format(self, logger, event_name, data):
if isinstance(data, dict):
items = data.items()
if logger.kargs_sorting:
order = dict((k, i) for i, k in enumerate(logger.kargs_sorting))
items = sorted(items, key=lambda x: order.get(x[0], 1000))
msg = ", ".join("%s=%s" % (k, v) for k, v in items)
elif isinstance(data, list):
msg = ", ".join(map(str, data))
else:
msg = str(data)
return "[%s]\t%s: %s" % (logger.name, event_name.ljust(15), msg)
class tJSONEncoder(json.JSONEncoder):
def default(self, obj):
from pyethereum.peer import Peer # FIXME
if isinstance(obj, Peer):
return (obj.ip, obj.port)
# return repr(obj)
# Let the base class default method raise the TypeError
#return json.JSONEncoder.default(self, obj)
return repr(obj)
class JSONFormatter(object):
def format(self, logger, event_name, data):
return tJSONEncoder().encode({logger.name: {event_name: data}, 'ts':time.time()})
class LogWriter(object):
last = -1
def __init__(self, formatter, out_fh=sys.stdout):
self.loggers = []
self.formatter = formatter
self.out_fh = out_fh
def clear(self):
"removes all loggers"
for l in self.loggers[:]:
self.remove_logger(l)
__del__ = clear
def add_logger(self, logger_or_group):
assert isinstance(logger_or_group, (LogGroup, Logger))
logger_or_group.add_listener(self.log_cb)
self.loggers.append(logger_or_group)
def remove_logger(self, logger_or_group):
assert isinstance(logger_or_group, (LogGroup, Logger))
logger_or_group.remove_listener(self.log_cb)
self.loggers.remove(logger_or_group)
def log_cb(self, logger, event_name, data):
_ = repr((event_name, data))
if _ == self.last:
return # skip same msg if we get it from multiple groups
self.last = _
self.out_fh.write(self.formatter.format(logger, event_name, data) + '\n')
class Logger(object):
listeners = [] # register callbacks here
kargs_sorting = [] # sort order for formaters
is_active = 0
def __init__(self, name, level='warn'):
self.name = name
assert level in log_levels.keys(), (level, log_levels.keys())
self.level = level
self.listeners = []
self.kargs_sorting = []
def __repr__(self):
return '<Logger(%s, level=%s)' % (self.name, self.level)
def add_listener(self, cb):
self.listeners.append(cb)
def remove_listener(self, cb):
self.listeners.remove(cb)
def log(self, name_or_lazylog, *args, **kargs):
if not self.is_active:
return
if isinstance(name_or_lazylog, LazyLog):
kargs = name_or_lazylog.func()
event_name = name_or_lazylog.name
else:
event_name = name_or_lazylog
for l in self.listeners:
l(self, event_name, args or kargs)
__call__ = log
def configure_logging(logger_names):
assert isinstance(logger_names, list)
g = LogGroup('user')
for name in logger_names:
g.add_logger(logging.get(name.lower().strip()))
g.activate() # can only activate known loggers
logging.writer.clear()
logging.writer.add_logger(g)
# default config
logging = LogManager()
logging.writer = LogWriter(BaseFormatter())
logging.writer = LogWriter(JSONFormatter())
log_error = logging.create('error', level='error')
log_info = logging.create('info', level='info')
log_debug = logging.create('debug', level='debug')
# specific logger
log_net_info = logging.create('net_info', level='info')
log_net_debug = logging.create('net_debug', level='debug')
log_packet = logging.create('packet', level='debug')
log_eth = logging.create('wireeth', level='debug')
log_p2p = logging.create('wirep2p', level='debug')
log_packeter = logging.create('packeter', level='debug')
log_synchronizer = logging.create('sync', level='debug')
log_db = logging.create('db', level='debug')
log_miner = logging.create('miner', level='debug')
log_chain_warn = logging.create('chain_warn', level='warn')
log_chain_info = logging.create('chain', level='info')
log_chain_debug = logging.create('chain_debug', level='debug')
log_vm_exit = logging.create('vm_exit', level='debug')
log_vm_op = logging.create('vm_op', level='debug')
log_log = logging.create('log', level='info')
log_tx = logging.create('tx', level='debug')
log_msg = logging.create('msg', level='debug')
log_state = logging.create('state', level='debug')
log_block = logging.create('block', level='debug')
log_state_delta = logging.create('state_delta', level='debug')
log_pb = logging.group('pb', log_tx, log_msg, log_state, log_block)
log_vm = logging.group('vm', log_vm_op, log_vm_exit, log_log)
# default logger
log_basics = logging.group('default', *logging.list(level='info'))
# all logger
log_all = logging.group('all', *logging.list())
# configure log groups here
def all_loggers():
return logging.items().keys()
if __name__ == '__main__':
# LogManager keeps track of the logging
logging = LogManager()
# create logging for topics (many is good!)
log_a = logging.create('log_a', level='critical')
log_b = logging.create('log_b', level='info')
log_c = logging.create('log_c', level='debug')
log_d = logging.create('log_d', level='trace')
# log manager should know about them
assert log_a in logging.list()
# logs can be filtered by maximum level
print 'logging included in level "info"'
print logging.list(level="info")
# combine multiple logging in a group
#
log_ab = logging.group('log_ab', log_a, log_b)
# groups can be nested
#
log_abc = logging.group('log_abc', log_ab, log_c)
# loggers need to be activated
#
log_abc.activate()
# groups can list their logging
#
assert len(log_abc.list()) == 3
assert log_abc.list(level='critical')[0] == log_a
# log manager can list all groups
assert len(logging.list_groups()) == 2
# decide on a formatter
#
log_formatter = BaseFormatter()
# create a writer
#
log_writer = LogWriter(log_formatter)
# and add logging
#
log_writer.add_logger(log_abc)
# basic logging
#
log_a.log('event', a=1, b=2, c={'aa': 11})
# object __call__ provides a shortcut
#
log_a('event', call_used=True)
log_c('event c', c=1)
# lazy evaluation possible, only triggered if logger is_active
#
def lazy_cb():
log_a.log('lazy evaluated', lazy=True)
return dict(b=2)
log_b(LazyLog('late evaluated', lazy_cb))
# log_d was not actived
#
log_d('not logged', d=1)
def not_called():
raise Exception('not called if there is no listener')
log_d(LazyLog('not evaluated', not_called))
if log_d.is_active:
raise Exception('never called')
# we can also log strings and lists
#
log_a('list', range(10))
log_a('string', 'test string')
# logs can be added to multiple writers
# here using the JSON formatter and writing to StringIO
import StringIO
fh = StringIO.StringIO()
sio_writer = LogWriter(JSONFormatter(), out_fh=fh)
log_all = logging.group('all', *logging.list())
log_all.activate()
sio_writer.add_logger(log_all)
log_a('json list', range(10))
log_a('json event', a=1, b=2, c={'aa': 11})
fh.seek(0)
print fh.read()
|
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
import math
import unittest
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import paddle.tensor as tensor
import paddle.utils as utils
from paddle.fluid import layers
from paddle.fluid.framework import in_dygraph_mode
from paddle.nn.layer.transformer import _convert_param_attr_to_list
from paddle.fluid.initializer import Normal, Constant, NumpyArrayInitializer
from paddle.distributed import fleet
import paddle.static as static
import paddle.distributed.auto_parallel as auto
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.utils import check_distributed_attr_for_program
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer
from paddle.distributed.auto_parallel.utils import _get_comm_group
from paddle.distributed.auto_parallel.process_group import new_process_group
paddle.enable_static()
_global_parallel_strategy = None
_global_process_mesh = None
def check_tensor_split(prog1, varnames1, prog2, varnames2, axis, nsplit):
for i in range(len(varnames1)):
var1 = prog1.global_block().var(varnames1[i] + '@GRAD')
var2 = prog2.global_block().var(varnames2[i])
if var1.shape[axis] != (var2.shape[axis] // nsplit):
return False
return True
def is_valid_completed_program(dist_context, program):
# TODO (ZJ-LIANG) should check all block
ops = program.global_block().ops
vars_ = program.list_vars()
for op in ops:
op_dist_attrs = dist_context.get_op_dist_attr_for_program(op)
if op_dist_attrs == None:
return False
if op_dist_attrs.process_mesh == None:
return False
for tensor_dist_attr in op_dist_attrs.inputs_dist_attrs.values():
if None == tensor_dist_attr.dims_mapping:
return False
for tensor_dist_attr in op_dist_attrs.outputs_dist_attrs.values():
if None == tensor_dist_attr.dims_mapping:
return False
for var in vars_:
var_dist_attrs = dist_context.get_tensor_dist_attr_for_program(var)
if var_dist_attrs == None:
return False
elif var_dist_attrs.process_mesh == None:
return False
elif var_dist_attrs.dims_mapping == None:
return False
return True
class MultiHeadAttention(nn.Layer):
"""
Attention mapps queries and a set of key-value pairs to outputs, and
Multi-Head Attention performs multiple parallel attention to jointly attending
to information from different representation subspaces.
"""
Cache = collections.namedtuple("Cache", ["k", "v"])
StaticCache = collections.namedtuple("StaticCache", ["k", "v"])
def __init__(self,
embed_dim,
num_heads,
dropout=0.,
kdim=None,
vdim=None,
need_weights=False,
weight_attr=None,
bias_attr=None,
topo=None,
fuse=False):
super(MultiHeadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.need_weights = need_weights
self.fuse = fuse
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if topo is None or topo.mp_info.size == 1:
if self.fuse:
assert self.kdim == embed_dim
assert self.vdim == embed_dim
self.qkv_proj = nn.Linear(
embed_dim, 3 * embed_dim, weight_attr, bias_attr=bias_attr)
else:
self.q_proj = nn.Linear(
embed_dim, embed_dim, weight_attr, bias_attr=bias_attr)
self.k_proj = nn.Linear(
self.kdim, embed_dim, weight_attr, bias_attr=bias_attr)
self.v_proj = nn.Linear(
self.vdim, embed_dim, weight_attr, bias_attr=bias_attr)
self.out_proj = nn.Linear(
embed_dim, embed_dim, weight_attr, bias_attr=bias_attr)
def _fuse_prepare_qkv(self, query):
mix_layer = self.qkv_proj(query)
mix_layer = paddle.reshape_(mix_layer,
[0, 0, self.num_heads, 3 * self.head_dim])
mix_layer = paddle.transpose(mix_layer, [0, 2, 1, 3])
q, k, v = paddle.split(mix_layer, num_or_sections=3, axis=-1)
return q, k, v
def _prepare_qkv(self, query, key, value, use_cache=False, cache=None):
r"""
Prapares linear projected queries, keys and values for usage of subsequnt
multiple parallel attention. If `cache` is not None, using cached results
to reduce redundant calculations.
"""
q = self.q_proj(query)
if _global_parallel_strategy == "mp":
auto.shard_tensor(
self.q_proj.weight,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [-1, 0]
})
elif _global_parallel_strategy == "dp_mp":
auto.shard_tensor(
self.q_proj.weight,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [-1, 1]
})
q = tensor.reshape(x=q, shape=[0, 0, self.num_heads, self.head_dim])
q = tensor.transpose(x=q, perm=[0, 2, 1, 3])
if isinstance(cache, self.StaticCache):
# for encoder-decoder attention in inference and has cached
k, v = cache.k, cache.v
else:
k, v = self.compute_kv(key, value)
if isinstance(cache, self.Cache):
# for decoder self-attention in inference
k = tensor.concat([cache.k, k], axis=2)
v = tensor.concat([cache.v, v], axis=2)
if use_cache is True:
cache = self.Cache(k, v)
return (q, k, v) if use_cache is False else (q, k, v, cache)
def compute_kv(self, key, value):
r"""
Applies linear projection on input keys and values, then splits heads
(reshape and transpose) to get keys and values from different representation
subspaces. The results are used as key-values pairs for subsequent multiple
parallel attention.
It is part of calculations in multi-head attention, and is provided as
a method to pre-compute and prefetch these results, thus we can use them
to construct cache for inference.
"""
k = self.k_proj(key)
if _global_parallel_strategy == "mp":
auto.shard_tensor(
self.k_proj.weight,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [-1, 0]
})
elif _global_parallel_strategy == "dp_mp":
auto.shard_tensor(
self.k_proj.weight,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [-1, 1]
})
v = self.v_proj(value)
if _global_parallel_strategy == "mp":
auto.shard_tensor(
self.v_proj.weight,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [-1, 0]
})
elif _global_parallel_strategy == "dp_mp":
auto.shard_tensor(
self.v_proj.weight,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [-1, 1]
})
k = tensor.reshape(x=k, shape=[0, 0, self.num_heads, self.head_dim])
k = tensor.transpose(x=k, perm=[0, 2, 1, 3])
v = tensor.reshape(x=v, shape=[0, 0, self.num_heads, self.head_dim])
v = tensor.transpose(x=v, perm=[0, 2, 1, 3])
return k, v
def gen_cache(self, key, value=None, type=Cache):
"""
Generates cache for `forward` usage in inference accroding to arguments.
The generated cache is an instance of `MultiHeadAttention.Cache` or an
instance of `MultiHeadAttention.StaticCache`.
"""
if type == MultiHeadAttention.StaticCache: # static_kv
k, v = self.compute_kv(key, value)
return self.StaticCache(k, v)
elif value is None: # incremental_state
k = layers.fill_constant_batch_size_like(
input=key,
shape=[-1, self.num_heads, 0, self.head_dim],
dtype=key.dtype,
value=0)
v = layers.fill_constant_batch_size_like(
input=key,
shape=[-1, self.num_heads, 0, self.head_dim],
dtype=key.dtype,
value=0)
return self.Cache(k, v)
else:
# incremental_state with initial value, mainly for usage like UniLM
return self.Cache(key, value)
def forward(self,
query,
key,
value,
attn_mask=None,
use_cache=False,
cache=None):
r"""
Applies multi-head attention to map queries and a set of key-value pairs
to outputs.
"""
key = query if key is None else key
value = query if value is None else value
# compute q ,k ,v
if use_cache is False:
if self.fuse:
q, k, v = self._fuse_prepare_qkv(query)
else:
q, k, v = self._prepare_qkv(query, key, value, use_cache, cache)
else:
q, k, v, cache = self._prepare_qkv(query, key, value, use_cache,
cache)
# scale dot product attention
product = layers.matmul(
x=q, y=k, transpose_y=True, alpha=self.head_dim**-0.5)
if attn_mask is not None:
product = product + attn_mask
weights = F.softmax(product)
if self.dropout:
weights = F.dropout(
weights,
self.dropout,
training=self.training,
mode="upscale_in_train")
out = tensor.matmul(weights, v)
# combine heads
out = tensor.transpose(out, perm=[0, 2, 1, 3])
out = tensor.reshape(x=out, shape=[0, 0, out.shape[2] * out.shape[3]])
# project to output
out = self.out_proj(out)
if _global_parallel_strategy == "mp":
auto.shard_tensor(
self.out_proj.weight,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [0, -1]
})
elif _global_parallel_strategy == "dp_mp":
auto.shard_tensor(
self.out_proj.weight,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [1, -1]
})
outs = [out]
if self.need_weights:
outs.append(weights)
if use_cache:
outs.append(cache)
return out if len(outs) == 1 else tuple(outs)
class TransformerDecoder(nn.Layer):
"""
TransformerDecoder is a stack of N decoder layers.
"""
def __init__(self,
decoder_layers,
num_layers,
norm=None,
hidden_size=None,
topo=None):
super(TransformerDecoder, self).__init__()
self.topo = topo
self.num_layers = num_layers
self.layers = decoder_layers
self.norm = norm
if norm is "LayerNorm":
self.norm = nn.LayerNorm(hidden_size)
elif norm is not None:
raise ValueError("Only support LayerNorm")
self.checkpoints = []
def forward(self,
tgt,
memory,
tgt_mask=None,
memory_mask=None,
use_cache=False,
cache=None):
r"""
Applies a stack of N Transformer decoder layers on inputs. If `norm` is
provided, also applies layer normalization on the output of last decoder
layer.
"""
output = tgt
new_caches = []
self.checkpoints = []
for i, mod in enumerate(self.layers):
if cache is None:
if use_cache:
output, new_cache = mod(output,
memory,
tgt_mask=tgt_mask,
use_cache=use_cache,
cache=cache)
new_caches.append(new_cache)
else:
output = mod(output,
memory,
tgt_mask=tgt_mask,
use_cache=use_cache,
cache=cache)
else:
output, new_cache = mod(output,
memory,
tgt_mask=tgt_mask,
use_cache=use_cache,
cache=cache[i])
new_caches.append(new_cache)
self.checkpoints.append(output.name)
if self.norm is not None:
output = self.norm(output)
return output if use_cache is False else (output, new_caches)
def gen_cache(self, memory, do_zip=False):
r"""
Generates cache for `forward` usage. The generated cache is a list, and
each element in it is a tuple( :code:`(incremental_cache, static_cache)` )
produced by `TransformerDecoderLayer.gen_cache`. See `TransformerDecoderLayer.gen_cache`
for more details. If `do_zip` is True, apply `zip` on these tuples to get
a list with two elements.
"""
cache = [layer.gen_cache(memory) for layer in self.layers]
if do_zip:
cache = list(zip(*cache))
return cache
class TransformerDecoderLayer(nn.Layer):
"""
The transformer decoder layer.
It contains multiheadattention and some linear layers.
"""
def __init__(self,
d_model,
nhead,
dim_feedforward,
dropout=0.1,
activation="gelu",
attn_dropout=None,
act_dropout=None,
normalize_before=True,
weight_attr=None,
bias_attr=None,
topo=None):
self._config = locals()
self._config.pop("self")
self._config.pop("__class__", None) # py3
super(TransformerDecoderLayer, self).__init__()
attn_dropout = dropout if attn_dropout is None else attn_dropout
act_dropout = dropout if act_dropout is None else act_dropout
self.normalize_before = normalize_before
weight_attrs = _convert_param_attr_to_list(weight_attr, 3)
bias_attrs = _convert_param_attr_to_list(bias_attr, 3)
self.self_attn = MultiHeadAttention(
d_model,
nhead,
dropout=attn_dropout,
weight_attr=weight_attrs[0],
bias_attr=bias_attrs[0],
topo=topo)
if topo is None or topo.mp_info.size == 1:
self.linear1 = nn.Linear(
d_model,
dim_feedforward,
weight_attrs[2],
bias_attr=bias_attrs[2])
self.linear2 = nn.Linear(
dim_feedforward,
d_model,
weight_attrs[2],
bias_attr=bias_attrs[2])
self.norm1 = nn.LayerNorm(d_model, epsilon=1e-5)
self.norm2 = nn.LayerNorm(d_model, epsilon=1e-5)
self.dropout1 = nn.Dropout(dropout, mode="upscale_in_train")
self.dropout2 = nn.Dropout(act_dropout, mode="upscale_in_train")
self.activation = getattr(F, activation)
def forward(self, tgt, memory, tgt_mask=None, use_cache=False, cache=None):
residual = tgt
if self.normalize_before:
tgt = self.norm1(tgt)
if use_cache is False:
tgt = self.self_attn(tgt, tgt, tgt, tgt_mask, use_cache, cache)
else:
tgt, incremental_cache = self.self_attn(tgt, tgt, tgt, tgt_mask,
use_cache, cache)
tgt = residual + self.dropout1(tgt)
if not self.normalize_before:
tgt = self.norm1(tgt)
residual = tgt
if self.normalize_before:
tgt = self.norm2(tgt)
if _global_parallel_strategy == "mp":
auto.shard_tensor(
self.linear1.weight,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [-1, 0]
})
elif _global_parallel_strategy == "dp_mp":
auto.shard_tensor(
self.linear1.weight,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [-1, 1]
})
if _global_parallel_strategy == "mp":
auto.shard_tensor(
self.linear2.weight,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [0, -1]
})
elif _global_parallel_strategy == "dp_mp":
auto.shard_tensor(
self.linear2.weight,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [1, -1]
})
# tgt = self.dropout2(
# self.linear2(F.gelu(
# self.linear1(tgt), approximate=True)))
tgt = self.linear1(tgt)
tgt = F.gelu(tgt, approximate=True)
tgt = self.dropout2(self.linear2(tgt))
tgt = residual + tgt
if not self.normalize_before:
tgt = self.norm2(tgt)
return tgt if use_cache is False else (tgt, incremental_cache)
def gen_cache(self, memory):
incremental_cache = self.self_attn.gen_cache(
memory, type=self.self_attn.Cache)
return incremental_cache
class GPTEmbeddings(nn.Layer):
"""
Include embeddings from word, position and token_type embeddings
"""
def __init__(self,
vocab_size,
hidden_size=768,
hidden_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02,
topo=None):
super(GPTEmbeddings, self).__init__()
if topo is None or topo.mp_info.size == 1:
self.word_embeddings = nn.Embedding(
vocab_size,
hidden_size,
weight_attr=paddle.ParamAttr(
name="word_embeddings",
initializer=nn.initializer.Normal(
mean=0.0, std=initializer_range)))
self.position_embeddings = nn.Embedding(
max_position_embeddings,
hidden_size,
weight_attr=paddle.ParamAttr(
name="pos_embeddings",
initializer=nn.initializer.Normal(
mean=0.0, std=initializer_range)))
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, input_ids, position_ids=None):
if position_ids is None:
ones = paddle.ones_like(input_ids, dtype="int64")
seq_length = paddle.cumsum(ones, axis=-1)
position_ids = seq_length - ones
input_embedings = self.word_embeddings(input_ids)
if _global_parallel_strategy == "mp":
auto.shard_tensor(
self.word_embeddings.weight,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [0, -1]
})
elif _global_parallel_strategy == "dp_mp":
auto.shard_tensor(
self.word_embeddings.weight,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [1, -1]
})
position_embeddings = self.position_embeddings(position_ids)
embeddings = input_embedings + position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
class GPTModel(nn.Layer):
"""
The base model of gpt.
"""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=4,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02,
pad_token_id=0,
topo=None):
super(GPTModel, self).__init__()
self.pad_token_id = pad_token_id
self.initializer_range = initializer_range
self.topo = topo
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.pipline_mode = topo is not None and topo.pp_info.size > 1
if self.pipline_mode:
self.layer_per_stage = num_hidden_layers // self.topo.pp_info.size
self.embeddings = GPTEmbeddings(
vocab_size, hidden_size, hidden_dropout_prob,
max_position_embeddings, type_vocab_size, self.initializer_range,
topo)
decoder_layers = nn.LayerList()
for i in range(num_hidden_layers):
DecoderLayer = TransformerDecoderLayer
decoder_layers.append(
DecoderLayer(
d_model=hidden_size,
nhead=num_attention_heads,
dim_feedforward=intermediate_size,
dropout=hidden_dropout_prob,
activation=hidden_act,
attn_dropout=attention_probs_dropout_prob,
act_dropout=hidden_dropout_prob,
weight_attr=paddle.ParamAttr(
initializer=nn.initializer.Normal(
mean=0.0, std=self.initializer_range)),
bias_attr=None,
topo=topo))
Decoder = TransformerDecoder
self.decoder = Decoder(
decoder_layers,
num_hidden_layers,
norm="LayerNorm",
hidden_size=hidden_size,
topo=topo)
self.checkpoints = []
def forward(self,
input_ids,
position_ids=None,
attention_mask=None,
use_cache=False,
cache=None):
self.checkpoints = []
if attention_mask is None:
length = paddle.shape(input_ids)[1]
# Use bool mask
attention_mask = paddle.tensor.tril(
paddle.ones(
(length, length),
dtype=self.embeddings.word_embeddings.weight.dtype))
if position_ids is None:
past_length = 0
if cache is not None:
past_length = paddle.shape(cache[0].k)[-2]
position_ids = paddle.arange(
past_length,
paddle.shape(input_ids)[-1] + past_length,
dtype='int64')
position_ids = position_ids.unsqueeze(0)
# .expand_as(input_ids)
position_ids = paddle.fluid.layers.expand_as(position_ids,
input_ids)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids)
# TODO, use registered buffer
causal_mask = paddle.tensor.triu(
paddle.ones((paddle.shape(input_ids)[-1],
paddle.shape(input_ids)[-1])) * -1e9,
diagonal=1)
if attention_mask is not None:
attention_mask = attention_mask + causal_mask
else:
attention_mask = causal_mask
# The tensor returned by triu not in static graph.
attention_mask.stop_gradient = True
encoder_outputs = self.decoder(
embedding_output,
memory=None,
tgt_mask=attention_mask,
use_cache=use_cache,
cache=cache)
self.checkpoints.extend(self.decoder.checkpoints)
return encoder_outputs
class GPTForPretraining(nn.Layer):
"""
The pretraining model of GPT.
It returns some logits and cached_kvs.
"""
def __init__(self, gpt):
super(GPTForPretraining, self).__init__()
self.gpt = gpt
self.share_param = False
self.weight = self.gpt.embeddings.word_embeddings.weight
if not self.share_param:
self.weight = self.create_parameter(shape=self.weight.shape)
def parallel_matmul(self, lm_output, logit_weights, parallel_output, topo):
if topo is not None and topo.mp_info.size > 1:
input_parallel = paddle.distributed.collective._c_identity(
lm_output, group=None)
logits = paddle.matmul(
input_parallel, logit_weights, transpose_y=True)
if parallel_output:
return logits
return paddle.distributed.collective._c_concat(logits, group=None)
else:
logits = paddle.matmul(lm_output, logit_weights, transpose_y=True)
return logits
def forward(self,
input_ids,
position_ids=None,
attention_mask=None,
masked_positions=None,
use_cache=False,
cache=None):
outputs = self.gpt(input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
use_cache=use_cache,
cache=cache)
if use_cache:
encoder_outputs, cached_kvs = outputs[:2]
else:
encoder_outputs = outputs
logits = self.parallel_matmul(encoder_outputs, self.weight, True,
self.gpt.topo)
if use_cache:
return logits, cached_kvs
else:
return logits
class GPTPretrainingCriterion(nn.Layer):
"""
Criterion for GPT.
It calculates the final loss.
"""
def __init__(self, topo=None):
super(GPTPretrainingCriterion, self).__init__()
if topo is None or topo.mp_info.size == 1:
self.loss_func = paddle.nn.CrossEntropyLoss(reduction="none")
else:
self.loss_func = paddle.distributed.collective._c_softmax_with_cross_entropy
def forward(self, prediction_scores, masked_lm_labels, loss_mask):
masked_lm_loss = self.loss_func(prediction_scores,
masked_lm_labels.unsqueeze(2))
loss_mask = loss_mask.reshape([-1])
masked_lm_loss = paddle.sum(masked_lm_loss.reshape([-1]) * loss_mask)
loss = masked_lm_loss / loss_mask.sum()
return loss
def gpt_pretrain_forward(train_program, startup_program):
with static.program_guard(train_program,
startup_program), utils.unique_name.guard():
batch_size = 16
sequence_len = 512
input_ids = static.data(
name="input_ids", shape=[batch_size, sequence_len], dtype='int64')
position_ids = static.data(
name="position_ids",
shape=[batch_size, sequence_len],
dtype='int64')
attention_mask = static.data(
name="attention_mask",
shape=[batch_size, 1, sequence_len, sequence_len],
dtype='float64')
labels = static.data(
name="labels", shape=[batch_size, sequence_len], dtype='int64')
loss_mask = static.data(
name="loss_mask", shape=[batch_size, sequence_len], dtype='float64')
if _global_parallel_strategy == "dp":
auto.shard_tensor(
input_ids,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [0, -1]
})
elif _global_parallel_strategy == "dp_mp":
auto.shard_tensor(
input_ids,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [0, -1]
})
gpt = GPTModel(
vocab_size=32768,
hidden_size=768,
num_hidden_layers=2,
num_attention_heads=12,
intermediate_size=4096,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=1024,
type_vocab_size=16,
initializer_range=0.02,
pad_token_id=0,
topo=None)
model = GPTForPretraining(gpt)
preds = model(input_ids, position_ids, attention_mask)
criterion = GPTPretrainingCriterion()
loss = criterion(preds, labels, loss_mask)
return train_program, startup_program, loss
class FakeStrategy(object):
def __init__(self):
self.amp = False
self.recompute = False
class FakeFleet(object):
def __init__(self):
self.user_defined_optimizer = None
self._user_defined_strategy = FakeStrategy()
class TestGPTPartitioner(unittest.TestCase):
def test_gpt_dp_mp(self):
global _global_parallel_strategy
_global_parallel_strategy = "dp_mp"
global _global_process_mesh
_global_process_mesh = auto.ProcessMesh(
mesh=[[0, 1, 2, 3], [4, 5, 6, 7]])
train_program = static.Program()
startup_program = static.Program()
parallelizer = AutoParallelizer(FakeFleet())
dist_context = parallelizer._dist_context
dist_context.process_mesh = _global_process_mesh
train_program, startup_program, loss = gpt_pretrain_forward(
train_program, startup_program)
completer = Completer(dist_context)
complete_train_program = completer.complete_forward_annotation(
train_program)
# serial backward pass
params_grads = parallelizer._generate_backward(
complete_train_program,
startup_program,
loss,
parameter_list=None,
no_grad_set=None,
callbacks=None)
rank_id = 3
partitioner = Partitioner(dist_context, rank_id)
auto_parallel_main_prog, auto_parallel_startup_prog, params_grads = partitioner.partition(
complete_train_program, startup_program, params_grads)
with open("./test_auto_parallel_partitioner_serial_main_new.txt",
"w") as fw:
fw.write(str(train_program))
with open("./test_auto_parallel_partitioner_serial_startup_new.txt",
"w") as fw:
fw.write(str(startup_program))
from paddle.distributed.auto_parallel.dist_context import set_default_distributed_context
set_default_distributed_context(dist_context)
with open("./test_auto_parallel_partitioner_main_new.txt1", "w") as fw:
fw.write(str(auto_parallel_main_prog))
with open("./test_auto_parallel_partitioner_startup_new.txt1",
"w") as fw:
fw.write(str(auto_parallel_startup_prog))
# with open("./test_auto_parallel_partitioner_main_completed.txt", "w") as fw:
# from paddle.distributed.auto_parallel.completion import Completer
# completer = Completer()
# completer.complete_forward_annotation(auto_parallel_main_prog)
# fw.write(str(auto_parallel_main_prog))
nrank = 4
# col parallel
weights = [
'linear_0.w_0',
'linear_6.w_0',
'linear_10.w_0',
]
self.assertTrue(
check_tensor_split(auto_parallel_main_prog, weights,
complete_train_program, weights, 1, nrank))
# row parallel
weights = ['word_embeddings', 'linear_9.w_0', 'linear_11.w_0']
self.assertTrue(
check_tensor_split(auto_parallel_main_prog, weights,
complete_train_program, weights, 0, nrank))
weights = ['pos_embeddings', 'layer_norm_0.b_0', 'layer_norm_4.w_0']
self.assertTrue(
check_tensor_split(auto_parallel_main_prog, weights,
complete_train_program, weights, 0, 1))
all_params = sorted(
[param.name for param in startup_program.all_parameters()])
allreduce_grads = [
'layer_norm_5.tmp_2', 'layer_norm_5.tmp_2', 'layer_norm_5.tmp_2',
'layer_norm_6.tmp_2', 'layer_norm_7.tmp_2', 'layer_norm_7.tmp_2',
'layer_norm_7.tmp_2', 'layer_norm_8.tmp_2'
]
process_mesh = _global_process_mesh
mp_parallel_axis = 1
dp_parallel_axis = 0
group_ranks = _get_comm_group(
process_mesh.processes, process_mesh.topology, mp_parallel_axis, 3)
mp_ring_id = new_process_group(group_ranks).id
group_ranks = _get_comm_group(
process_mesh.processes, process_mesh.topology, dp_parallel_axis, 3)
dp_ring_id = new_process_group(group_ranks).id
tensor_parallel_allreduce_vars = sorted([
op.desc.output_arg_names()[0].split("@")[0]
for op in auto_parallel_main_prog.global_block().ops
if (op.type == "c_allreduce_sum" and op.attr('op_role') == 1 and
op.desc.attr("ring_id") == mp_ring_id)
])
data_parallel_allreduce_vars = sorted([
op.desc.output_arg_names()[0].split("@")[0]
for op in auto_parallel_main_prog.global_block().ops
if (op.type == "c_allreduce_sum" and op.desc.attr("ring_id") ==
dp_ring_id)
])
self.assertTrue(all_params == data_parallel_allreduce_vars)
self.assertTrue(allreduce_grads == tensor_parallel_allreduce_vars)
self.assertTrue(
is_valid_completed_program(dist_context, auto_parallel_main_prog))
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.keystone import basic
from tests.unit import test
BASE = "rally.plugins.openstack.scenarios.keystone."
BASIC = BASE + "basic.KeystoneBasic."
class KeystoneBasicTestCase(test.ScenarioTestCase):
@staticmethod
def _get_context():
context = test.get_test_context()
context.update({
"user": {
"id": "fake_user_id",
"credential": mock.MagicMock()
},
"tenant": {"id": "fake_tenant_id"}
})
return context
def test_create_user(self):
scenario = basic.KeystoneBasic(self.context)
scenario._user_create = mock.MagicMock()
scenario.create_user(password="tttt", tenant_id="id")
scenario._user_create.assert_called_once_with(password="tttt",
tenant_id="id")
def test_create_delete_user(self):
create_result = mock.MagicMock()
scenario = basic.KeystoneBasic(self.context)
scenario._user_create = mock.MagicMock(return_value=create_result)
scenario._resource_delete = mock.MagicMock()
scenario.create_delete_user(email="abcd", enabled=True)
scenario._user_create.assert_called_once_with(email="abcd",
enabled=True)
scenario._resource_delete.assert_called_once_with(create_result)
def test_create_user_set_enabled_and_delete(self):
scenario = basic.KeystoneBasic(self.context)
scenario._user_create = mock.Mock()
scenario._update_user_enabled = mock.Mock()
scenario._resource_delete = mock.Mock()
scenario.create_user_set_enabled_and_delete(enabled=True,
email="abcd")
scenario._user_create.assert_called_once_with(email="abcd",
enabled=True)
scenario._update_user_enabled.assert_called_once_with(
scenario._user_create.return_value, False)
scenario._resource_delete.assert_called_once_with(
scenario._user_create.return_value)
def test_create_tenant(self):
scenario = basic.KeystoneBasic(self.context)
scenario._tenant_create = mock.MagicMock()
scenario.create_tenant(enabled=True)
scenario._tenant_create.assert_called_once_with(enabled=True)
def test_create_tenant_with_users(self):
scenario = basic.KeystoneBasic(self.context)
fake_tenant = mock.MagicMock()
scenario._tenant_create = mock.MagicMock(return_value=fake_tenant)
scenario._users_create = mock.MagicMock()
scenario.create_tenant_with_users(users_per_tenant=1, enabled=True)
scenario._tenant_create.assert_called_once_with(enabled=True)
scenario._users_create.assert_called_once_with(fake_tenant,
users_per_tenant=1)
def test_create_and_list_users(self):
scenario = basic.KeystoneBasic(self.context)
scenario._user_create = mock.MagicMock()
scenario._list_users = mock.MagicMock()
scenario.create_and_list_users(password="tttt", tenant_id="id")
scenario._user_create.assert_called_once_with(password="tttt",
tenant_id="id")
scenario._list_users.assert_called_once_with()
def test_create_and_list_tenants(self):
scenario = basic.KeystoneBasic(self.context)
scenario._tenant_create = mock.MagicMock()
scenario._list_tenants = mock.MagicMock()
scenario.create_and_list_tenants(enabled=True)
scenario._tenant_create.assert_called_once_with(enabled=True)
scenario._list_tenants.assert_called_with()
def test_assign_and_remove_user_role(self):
context = self._get_context()
scenario = basic.KeystoneBasic(context)
fake_tenant = context["tenant"]["id"]
fake_user = context["user"]["id"]
fake_role = mock.MagicMock()
scenario._tenant_create = mock.MagicMock(return_value=fake_tenant)
scenario._user_create = mock.MagicMock(return_value=fake_user)
scenario._role_create = mock.MagicMock(return_value=fake_role)
scenario._role_add = mock.MagicMock()
scenario._role_remove = mock.MagicMock()
scenario.add_and_remove_user_role()
scenario._role_create.assert_called_once_with()
scenario._role_add.assert_called_once_with(fake_user,
fake_role,
fake_tenant)
scenario._role_remove.assert_called_once_with(fake_user,
fake_role,
fake_tenant)
def test_create_and_delete_role(self):
scenario = basic.KeystoneBasic(self.context)
fake_role = mock.MagicMock()
scenario._role_create = mock.MagicMock(return_value=fake_role)
scenario._resource_delete = mock.MagicMock()
scenario.create_and_delete_role()
scenario._role_create.assert_called_once_with()
scenario._resource_delete.assert_called_once_with(fake_role)
def test_create_and_list_user_roles(self):
context = self._get_context()
scenario = basic.KeystoneBasic(context)
fake_tenant = context["tenant"]["id"]
fake_user = context["user"]["id"]
fake_role = mock.MagicMock()
scenario._tenant_create = mock.MagicMock(return_value=fake_tenant)
scenario._user_create = mock.MagicMock(return_value=fake_user)
scenario._role_create = mock.MagicMock(return_value=fake_role)
scenario._role_add = mock.MagicMock()
scenario._list_roles_for_user = mock.MagicMock()
scenario.create_add_and_list_user_roles()
scenario._role_create.assert_called_once_with()
scenario._role_add.assert_called_once_with(fake_user,
fake_role, fake_tenant)
scenario._list_roles_for_user.assert_called_once_with(fake_user,
fake_tenant)
def _test_get_entities(self, service_name="keystone"):
scenario = basic.KeystoneBasic(self.context)
fake_tenant = mock.MagicMock()
fake_user = mock.MagicMock()
fake_role = mock.MagicMock()
fake_service = mock.MagicMock()
scenario._tenant_create = mock.MagicMock(return_value=fake_tenant)
scenario._user_create = mock.MagicMock(return_value=fake_user)
scenario._role_create = mock.MagicMock(return_value=fake_role)
scenario._service_create = mock.MagicMock(return_value=fake_service)
scenario._get_tenant = mock.MagicMock(return_value=fake_tenant)
scenario._get_user = mock.MagicMock(return_value=fake_user)
scenario._get_role = mock.MagicMock(return_value=fake_role)
scenario._get_service_by_name = mock.MagicMock(
return_value=fake_service)
scenario._get_service = mock.MagicMock(return_value=fake_service)
scenario.get_entities(service_name)
scenario._tenant_create.assert_called_once_with()
scenario._user_create.assert_called_once_with()
scenario._role_create.assert_called_once_with()
scenario._get_tenant.assert_called_once_with(fake_tenant.id)
scenario._get_user.assert_called_once_with(fake_user.id)
scenario._get_role.assert_called_once_with(fake_role.id)
if service_name is None:
scenario._service_create.assert_called_once_with()
self.assertFalse(scenario._get_service_by_name.called)
else:
scenario._get_service_by_name.assert_called_once_with(service_name)
self.assertFalse(scenario._service_create.called)
scenario._get_service.assert_called_once_with(fake_service.id)
def test_get_entities(self):
self._test_get_entities()
def test_get_entities_with_service_name(self):
self._test_get_entities(service_name="fooservice")
def test_get_entities_create_service(self):
self._test_get_entities(service_name=None)
def test_create_and_delete_service(self):
scenario = basic.KeystoneBasic(self.context)
service_type = "test_service_type"
description = "test_description"
fake_service = mock.MagicMock()
scenario._service_create = mock.MagicMock(return_value=fake_service)
scenario._delete_service = mock.MagicMock()
scenario.create_and_delete_service(service_type=service_type,
description=description)
scenario._service_create.assert_called_once_with(service_type,
description)
scenario._delete_service.assert_called_once_with(fake_service.id)
def test_create_update_and_delete_tenant(self):
scenario = basic.KeystoneBasic(self.context)
fake_tenant = mock.MagicMock()
scenario._tenant_create = mock.MagicMock(return_value=fake_tenant)
scenario._update_tenant = mock.MagicMock()
scenario._resource_delete = mock.MagicMock()
scenario.create_update_and_delete_tenant()
scenario._update_tenant.assert_called_once_with(fake_tenant)
scenario._resource_delete.assert_called_once_with(fake_tenant)
def test_create_user_update_password(self):
scenario = basic.KeystoneBasic(self.context)
fake_password = "pswd"
fake_user = mock.MagicMock()
scenario._user_create = mock.MagicMock(return_value=fake_user)
scenario.generate_random_name = mock.MagicMock(
return_value=fake_password)
scenario._update_user_password = mock.MagicMock()
scenario.create_user_update_password()
scenario.generate_random_name.assert_called_once_with()
scenario._user_create.assert_called_once_with()
scenario._update_user_password.assert_called_once_with(fake_user.id,
fake_password)
def test_create_and_list_services(self):
scenario = basic.KeystoneBasic(self.context)
service_type = "test_service_type"
description = "test_description"
fake_service = mock.MagicMock()
scenario._service_create = mock.MagicMock(return_value=fake_service)
scenario._list_services = mock.MagicMock()
scenario.create_and_list_services(service_type=service_type,
description=description)
scenario._service_create.assert_called_once_with(service_type,
description)
scenario._list_services.assert_called_once_with()
def test_create_and_list_ec2credentials(self):
context = self._get_context()
scenario = basic.KeystoneBasic(context)
scenario._create_ec2credentials = mock.MagicMock()
scenario._list_ec2credentials = mock.MagicMock()
scenario.create_and_list_ec2credentials()
scenario._create_ec2credentials.assert_called_once_with(
"fake_user_id", "fake_tenant_id")
scenario._list_ec2credentials.assert_called_with("fake_user_id")
def test_create_and_delete_ec2credential(self):
fake_creds = mock.MagicMock()
context = self._get_context()
scenario = basic.KeystoneBasic(context)
scenario._create_ec2credentials = mock.MagicMock(
return_value=fake_creds)
scenario._delete_ec2credential = mock.MagicMock()
scenario.create_and_delete_ec2credential()
scenario._create_ec2credentials.assert_called_once_with(
"fake_user_id", "fake_tenant_id")
scenario._delete_ec2credential.assert_called_once_with(
"fake_user_id", fake_creds.access)
|
|
#!/usr/local/bin/python
import copy, datetime, glob, os, re, stat, sys, time
import cmdline
import crcfile
import deanlib
import filelib
SHORT_HELP = '''dispatch <controls> <filters> <src> <dest> <files>'''
LONG_HELP = '''dispatch <controls> <filters> <src> <dest> <files>
Controls | Filters | File patterns
------------------------+-----------------------+-----------------------
-q quiet | -E is equal | file [file ...]
-c compare or -s size | -L is less than | /l letters
-l larger | -G is greater than | /d digits then letters
-f force | -A is animated | /h hex
-p perms | -X x size | /x aaadd.jpg
-k keep | -Y y size | /n numeric
-x .# renaming | -P area | /r:regex
-a append | -F frames | /p picture
-n numeric append | -S size | /s sound
-d deldir | -M modify time | /m movie
-m mkdir | -N newer | /a all
-o order <n|i|s|t> | -O older | /f:file
-r rename <l|m|u|d> | -C count | uppercase inverts
-g glob
-t trash
-w recurse (experimental)
-y sync (experimental)
'''
SWITCHES = "acdefgklmnpqstvwxyzAEGL"
OPTIONS = "orCFMNOPSXY"
sz_lup = {
'E': 'eq', 'L': 'lt', 'G': 'gt',
'X': 'x', 'Y': 'y',
'P': 'a', 'A': 'an', 'F': 'fr',
'S': 'fs', 'M': 'mt', 'C': 'ct',
'N': 'nw', 'O': 'ol',
}
# uppercase to invert
slashes = { # performs a match against these
'/a': '.*\.',
'/l': '\w.*\.',
'/d': '\d\d*\w?\.',
'/h': '[0-9a-f]{8}\.',
'/x': '[a-z]{0,3}\d{2}\.jpg$',
'/m': '\w.*\.(asf|avi|rm|qt|mov|swf|flv|mpg|mp4)$',
'/s': '\w.*\.(mp3|wav)$',
'/p': '\w.*\.(jpg|jpeg|gif|png|bmp|pdf)$',
'/n': '\d\d*\.',
'/r': '\y',
'/b': '^\y',
'/e': '\y$',
}
sort_bys = {
'n': lambda x: x,
'i': lambda x: x.lower(),
's': lambda x: os.stat(x).st_size,
't': lambda x: os.stat(x).st_mtime,
}
digits = '0123456789'
letters = 'abcdefghijklmnopqrstuvwxyz'
mults = {'k': 1024, 'm': 1024*1024, 'g': 1024*1024*1024}
rename_types = {'u': 'upper', 'l': 'lower', 'm': 'mixed'}
def GenNumeric():
n = 0
while 1:
yield n
n += 1
def GenAlpha():
a = 'a'
while 1:
yield a
a = a[:-1] + chr(ord(a[-1]) + 1)
if a[-1] > 'z':
a = 'aa'
# TODO: finish this
def RenameAppend(src, dst, srcdir='.', dstdir='.', switch={}):
# switch c e f k l n p q
root, ext = os.path.splitext(dst)
if switch.get('e'):
ext = '.' + switch['e'][-1]
if switch.get('n'):
num = 1
while True:
d = root + '_' + str(num) + ext
if RenameOrCopy(os.path.join(srcdir, src), os.path.join(dstdir, d), switch=switch):
return True
num = num + 1
if RenameOrCopy(os.path.join(srcdir, src), os.path.join(dstdir, dst), switch=switch):
return True
if root[-1] in digits:
addons = letters
else:
addons = digits
for a in addons:
try: # in case the previous iteration deleted the source
sts = os.stat(os.path.join(srcdir, src))
except:
found = os.path.exists(os.path.join(srcdir, src))
break
found = False
if ext:
d = root + a + ext
else:
d = root + a
if RenameOrCopy(os.path.join(srcdir, src), os.path.join(dstdir, d), switch=switch):
return True
return False
def RenameOrCopy(src, dst, switch):
# switch c f k l p q
if switch.get('k'):
ret = filelib.SafeCopy(src, dst, sw=switch,
comp=switch.get('c'), force=switch.get('f'), larger=switch.get('l'), quiet=switch.get('q'))
else:
ret = filelib.SafeRename(src, dst,
comp=switch.get('c'), force=switch.get('f'), larger=switch.get('l'), quiet=switch.get('q'))
if ret and switch.get('p'):
os.chmod(dst, stat.S_IREAD | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
return ret
def GetFiles(srcdir, filespecs, doglob):
if type(filespecs) == str:
filespecs = [filespecs]
cwd = os.getcwd()
curdrv = cwd2 = ''
if len(srcdir) > 1 and srcdir[1] == ':' and cwd[:2] != srcdir[:2]:
curdrv = cwd[:2]
os.chdir(srcdir[:2])
cwd2 = os.getcwd()
os.chdir(srcdir)
files = []
for filespec in filespecs:
if filespec.startswith('/f:'):
if os.path.exists(filespec[3:]):
files.extend(open(filespec[3:]).read().split('\n'))
elif filespec[:2].lower() in slashes:
fs_pat = slashes[filespec[:2]]
if len(filespec) > 2:
fs_pat = fs_pat.replace('\y', fs_pat[3:])
fs_re = re.compile(fs_pat)
if filespec[1].lower() == filespec[1]:
files.extend(filter(lambda x: fs_re.match(x) != None, os.listdir('.')))
else:
files.extend(filter(lambda x: not fs_re.match(x) != None, os.listdir('.')))
elif doglob:
files.extend(glob.glob(filespec + '*'))
elif '*' in filespec or '?' in filespec:
files.extend(glob.glob(filespec))
else:
files.extend([filespec])
if cwd2:
os.chdir(cwd2)
if curdrv:
os.chdir(curdrv)
os.chdir(cwd)
return files
# main
def Dispatch(srcdirspec, dstdir, filespec, switch=None, sz=None):
if srcdirspec is None or dstdir is None:
if not switch.get('q'):
print "Source or destination wasn't specified."
sys.exit(1)
if not switch:
switch = cmdline.FlagClass()
if not sz:
sz = cmdline.FlagClass()
if switch['v']:
sz['v'] = switch['v']
dstCRC = None
if dstdir.startswith(':'):
pass
elif not os.path.exists(dstdir):
if switch.get('m'):
os.mkdir(dstdir)
else:
if not switch.get('q'):
print "Destination directory doesn't seem to exist:", dstdir
sys.exit(1)
elif dstdir != '/dev/null':
dstCRC = crcfile.crc_file(dstdir)
count = 0
sort_by = sort_bys['n']
reverse = False
if switch.get('o'):
sw_s = switch['o'][-1]
if sw_s.startswith('-'):
reverse = not reverse
sw_s = sw_s[1:]
sort_by = sort_bys.get(sw_s, sort_by)
limit = sz.getnum('ct', 0)
if limit < 0:
limit = -limit
reverse = not reverse
if switch.get('w'): # wecursion (experimental)
# cannot glob
global dirlist
dirlist = []
deanlib.WalkDirs(srcdirspec, DirAccum, topdown=True, quiet=switch.get('q'))
for dirspec in dirlist:
dirspec = dirspec[len(srcdirspec) + 1:]
rdstdir = os.path.join(dstdir, dirspec)
if not os.path.exists(rdstdir):
os.mkdir(rdstdir)
dstCRC = crcfile.crc_file(rdstdir)
count += DispatchDir(os.path.join(srcdirspec, dirspec), rdstdir, dstCRC, filespec, switch, sz, sort_by, reverse, limit)
# crcfile.WriteCRCFile(rdstdir, dstCRC)
else:
for srcdir in glob.glob(srcdirspec):
count += DispatchDir(srcdir, dstdir, dstCRC, filespec, switch, sz, sort_by, reverse, limit)
del dstCRC
# if dstdir != '/dev/null':
# crcfile.WriteCRCFile(dstdir, dstCRC)
if switch.get('k'):
print count, "files copied"
else:
print count, "files moved"
return count
def DirAccum(dir, fl):
global dirlist
dirlist.append(dir)
def DispatchDir(srcdir, dstdir, dstCRC, filespec, switch, sz, sort_by, reverse, limit):
# switch a c d e f g k l n p q r v x
if not os.path.isdir(srcdir):
return 0
if switch.get('v'):
print "DispatchDir", srcdir, dstdir, filespec, switch, sz
if not os.path.exists(srcdir):
if not switch.get('q'):
print "Source directory doesn't seem to exist:", srcdir
sys.exit(1)
try:
if not dstdir.startswith(':') and os.path.samefile(srcdir, dstdir):
if not switch.get('q'):
print "Directories appear to be the same:", srcdir, dstdir
return 0
except:
pass
files = GetFiles(srcdir, filespec, switch.get('g'))
files.sort(key=sort_by)
if reverse:
files.reverse()
if switch.get('v'):
print files
srcCRC = crcfile.crc_file(srcdir)
count = 0
thisdstdir = dstdir
for f in files:
fsz = cmdline.FlagClass(sz)
if os.path.isdir(os.path.join(srcdir, f)):
continue
if switch.get('v'):
print f
d = f
if switch.get('r'):
d = deanlib.CleanName(d, rename=rename_types.get(switch['r'][0].lower()))
if fsz:
if switch.get('v'):
print 'fsz =', fsz
import imaglib
if not imaglib.ImgSizeCheck(os.path.join(srcdir, f), fsz):
if switch.get('v'):
print "! failed filter"
continue
found = False
root, ext = os.path.splitext(d)
if switch.get('x') and ext and ext[1:].isdigit():
d = root
root, ext = os.path.splitext(d)
if dstdir == '/dev/null':
if not filelib.SafeRemove(os.path.join(srcdir, f), sw=switch):
srcCRC.RemoveFile(f)
continue
elif dstdir.startswith(':'):
if not dstdir[-1] in f:
if switch.get('v'):
print "! no prefix directory found"
continue
thisdstdir = f[:f.find(dstdir[-1])].lower()
if os.path.exists(thisdstdir):
pass
elif switch.get('m'):
os.mkdir(thisdstdir)
else:
if not switch.get('q'):
print "Destination directory doesn't seem to exist:", thisdstdir
continue
if switch.get('a'): # Go through append-style rename process
if not RenameAppend(f, d, srcdir, thisdstdir, switch):
if switch.get('v'):
print "! failed rename"
continue
elif not RenameOrCopy(os.path.join(srcdir, f), os.path.join(thisdstdir, d), switch):
if switch.get('v'):
print "! failed destination"
continue
count += 1
if dstdir.startswith(':'):
pass
else:
crc = srcCRC.GetFile(f)
if crc and dstCRC:
dstCRC.AddFile(d, crc)
if not switch.get('k'):
srcCRC.RemoveFile(f)
if limit and count >= limit:
break
srcCRC.CleanFileList()
del srcCRC
if switch.get('d'):
try:
os.rmdir(srcdir)
except:
pass
return count
# TODO: I want very much to replace the old getopt stuff.
def CommandLine():
import argparse
parser = argparse.ArgumentParser()
def CommandLine(switches=SWITCHES, options=OPTIONS):
switch, files = cmdline.CommandLine(switches, options, short_help=SHORT_HELP, long_help=LONG_HELP)
sz = cmdline.MakeFlags(switch, sz_lup, options)
srcdir = dstdir = None
if files:
srcdir = files.pop(0)
if switch.get('t'):
dstdir = '/dev/null'
elif files:
dstdir = files.pop(0)
return srcdir, dstdir, files, switch, sz
if __name__ == '__main__':
pass
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Token provider interface."""
import abc
import base64
import datetime
import sys
import uuid
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import six
from keystone.common import cache
from keystone.common import dependency
from keystone.common import manager
from keystone import exception
from keystone.i18n import _, _LE
from keystone.models import token_model
from keystone import notifications
from keystone.token import persistence
from keystone.token import utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
MEMOIZE = cache.get_memoization_decorator(section='token')
# NOTE(morganfainberg): This is for compatibility in case someone was relying
# on the old location of the UnsupportedTokenVersionException for their code.
UnsupportedTokenVersionException = exception.UnsupportedTokenVersionException
# supported token versions
V2 = token_model.V2
V3 = token_model.V3
VERSIONS = token_model.VERSIONS
def base64_encode(s):
"""Encode a URL-safe string."""
return base64.urlsafe_b64encode(s).rstrip('=')
def random_urlsafe_str():
"""Generate a random URL-safe string."""
# chop the padding (==) off the end of the encoding to save space
return base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2]
def random_urlsafe_str_to_bytes(s):
"""Convert a string generated by ``random_urlsafe_str()`` to bytes."""
# restore the padding (==) at the end of the string
return base64.urlsafe_b64decode(s + '==')
def default_expire_time():
"""Determine when a fresh token should expire.
Expiration time varies based on configuration (see ``[token] expiration``).
:returns: a naive UTC datetime.datetime object
"""
expire_delta = datetime.timedelta(seconds=CONF.token.expiration)
return timeutils.utcnow() + expire_delta
def audit_info(parent_audit_id):
"""Build the audit data for a token.
If ``parent_audit_id`` is None, the list will be one element in length
containing a newly generated audit_id.
If ``parent_audit_id`` is supplied, the list will be two elements in length
containing a newly generated audit_id and the ``parent_audit_id``. The
``parent_audit_id`` will always be element index 1 in the resulting
list.
:param parent_audit_id: the audit of the original token in the chain
:type parent_audit_id: str
:returns: Keystone token audit data
"""
audit_id = random_urlsafe_str()
if parent_audit_id is not None:
return [audit_id, parent_audit_id]
return [audit_id]
@dependency.provider('token_provider_api')
@dependency.requires('assignment_api', 'revoke_api')
class Manager(manager.Manager):
"""Default pivot point for the token provider backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
driver_namespace = 'keystone.token.provider'
V2 = V2
V3 = V3
VERSIONS = VERSIONS
INVALIDATE_PROJECT_TOKEN_PERSISTENCE = 'invalidate_project_tokens'
INVALIDATE_USER_TOKEN_PERSISTENCE = 'invalidate_user_tokens'
_persistence_manager = None
def __init__(self):
super(Manager, self).__init__(CONF.token.provider)
self._register_callback_listeners()
def _register_callback_listeners(self):
# This is used by the @dependency.provider decorator to register the
# provider (token_provider_api) manager to listen for trust deletions.
callbacks = {
notifications.ACTIONS.deleted: [
['OS-TRUST:trust', self._trust_deleted_event_callback],
['user', self._delete_user_tokens_callback],
['domain', self._delete_domain_tokens_callback],
],
notifications.ACTIONS.disabled: [
['user', self._delete_user_tokens_callback],
['domain', self._delete_domain_tokens_callback],
['project', self._delete_project_tokens_callback],
],
notifications.ACTIONS.internal: [
[notifications.INVALIDATE_USER_TOKEN_PERSISTENCE,
self._delete_user_tokens_callback],
[notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE,
self._delete_user_project_tokens_callback],
[notifications.INVALIDATE_USER_OAUTH_CONSUMER_TOKENS,
self._delete_user_oauth_consumer_tokens_callback],
]
}
for event, cb_info in callbacks.items():
for resource_type, callback_fns in cb_info:
notifications.register_event_callback(event, resource_type,
callback_fns)
@property
def _needs_persistence(self):
return self.driver.needs_persistence()
@property
def _persistence(self):
# NOTE(morganfainberg): This should not be handled via __init__ to
# avoid dependency injection oddities circular dependencies (where
# the provider manager requires the token persistence manager, which
# requires the token provider manager).
if self._persistence_manager is None:
self._persistence_manager = persistence.PersistenceManager()
return self._persistence_manager
def _create_token(self, token_id, token_data):
try:
if isinstance(token_data['expires'], six.string_types):
token_data['expires'] = timeutils.normalize_time(
timeutils.parse_isotime(token_data['expires']))
self._persistence.create_token(token_id, token_data)
except Exception:
exc_info = sys.exc_info()
# an identical token may have been created already.
# if so, return the token_data as it is also identical
try:
self._persistence.get_token(token_id)
except exception.TokenNotFound:
six.reraise(*exc_info)
def validate_token(self, token_id, belongs_to=None):
unique_id = utils.generate_unique_id(token_id)
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
token = self._validate_token(unique_id)
self._token_belongs_to(token, belongs_to)
self._is_valid_token(token)
return token
def check_revocation_v2(self, token):
try:
token_data = token['access']
except KeyError:
raise exception.TokenNotFound(_('Failed to validate token'))
token_values = self.revoke_api.model.build_token_values_v2(
token_data, CONF.identity.default_domain_id)
self.revoke_api.check_token(token_values)
def validate_v2_token(self, token_id, belongs_to=None):
unique_id = utils.generate_unique_id(token_id)
if self._needs_persistence:
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
token_ref = self._persistence.get_token(unique_id)
else:
token_ref = token_id
token = self._validate_v2_token(token_ref)
token['access']['token']['id'] = token_id
self._token_belongs_to(token, belongs_to)
self._is_valid_token(token)
return token
def check_revocation_v3(self, token):
try:
token_data = token['token']
except KeyError:
raise exception.TokenNotFound(_('Failed to validate token'))
token_values = self.revoke_api.model.build_token_values(token_data)
self.revoke_api.check_token(token_values)
def check_revocation(self, token):
version = self.driver.get_token_version(token)
if version == V2:
return self.check_revocation_v2(token)
else:
return self.check_revocation_v3(token)
def validate_v3_token(self, token_id):
unique_id = utils.generate_unique_id(token_id)
# NOTE(lbragstad): Only go to persistent storage if we have a token to
# fetch from the backend (the driver persists the token). Otherwise
# the information about the token must be in the token id.
if not self._needs_persistence:
token_ref = token_id
else:
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
token_ref = self._persistence.get_token(unique_id)
token = self._validate_v3_token(token_ref)
self._is_valid_token(token)
return token
@MEMOIZE
def _validate_token(self, token_id):
if not self._needs_persistence:
return self.driver.validate_v3_token(token_id)
token_ref = self._persistence.get_token(token_id)
version = self.driver.get_token_version(token_ref)
if version == self.V3:
return self.driver.validate_v3_token(token_ref)
elif version == self.V2:
return self.driver.validate_v2_token(token_ref)
raise exception.UnsupportedTokenVersionException()
@MEMOIZE
def _validate_v2_token(self, token_id):
return self.driver.validate_v2_token(token_id)
@MEMOIZE
def _validate_v3_token(self, token_id):
return self.driver.validate_v3_token(token_id)
def _is_valid_token(self, token):
"""Verify the token is valid format and has not expired."""
current_time = timeutils.normalize_time(timeutils.utcnow())
try:
# Get the data we need from the correct location (V2 and V3 tokens
# differ in structure, Try V3 first, fall back to V2 second)
token_data = token.get('token', token.get('access'))
expires_at = token_data.get('expires_at',
token_data.get('expires'))
if not expires_at:
expires_at = token_data['token']['expires']
expiry = timeutils.normalize_time(
timeutils.parse_isotime(expires_at))
except Exception:
LOG.exception(_LE('Unexpected error or malformed token '
'determining token expiry: %s'), token)
raise exception.TokenNotFound(_('Failed to validate token'))
if current_time < expiry:
self.check_revocation(token)
# Token has not expired and has not been revoked.
return None
else:
raise exception.TokenNotFound(_('Failed to validate token'))
def _token_belongs_to(self, token, belongs_to):
"""Check if the token belongs to the right tenant.
This is only used on v2 tokens. The structural validity of the token
will have already been checked before this method is called.
"""
if belongs_to:
token_data = token['access']['token']
if ('tenant' not in token_data or
token_data['tenant']['id'] != belongs_to):
raise exception.Unauthorized()
def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None):
token_id, token_data = self.driver.issue_v2_token(
token_ref, roles_ref, catalog_ref)
if self._needs_persistence:
data = dict(key=token_id,
id=token_id,
expires=token_data['access']['token']['expires'],
user=token_ref['user'],
tenant=token_ref['tenant'],
metadata=token_ref['metadata'],
token_data=token_data,
bind=token_ref.get('bind'),
trust_id=token_ref['metadata'].get('trust_id'),
token_version=self.V2)
self._create_token(token_id, data)
return token_id, token_data
def issue_v3_token(self, user_id, method_names, expires_at=None,
project_id=None, domain_id=None, auth_context=None,
trust=None, metadata_ref=None, include_catalog=True,
parent_audit_id=None):
token_id, token_data = self.driver.issue_v3_token(
user_id, method_names, expires_at, project_id, domain_id,
auth_context, trust, metadata_ref, include_catalog,
parent_audit_id)
if metadata_ref is None:
metadata_ref = {}
if 'project' in token_data['token']:
# project-scoped token, fill in the v2 token data
# all we care are the role IDs
# FIXME(gyee): is there really a need to store roles in metadata?
role_ids = [r['id'] for r in token_data['token']['roles']]
metadata_ref = {'roles': role_ids}
if trust:
metadata_ref.setdefault('trust_id', trust['id'])
metadata_ref.setdefault('trustee_user_id',
trust['trustee_user_id'])
data = dict(key=token_id,
id=token_id,
expires=token_data['token']['expires_at'],
user=token_data['token']['user'],
tenant=token_data['token'].get('project'),
metadata=metadata_ref,
token_data=token_data,
trust_id=trust['id'] if trust else None,
token_version=self.V3)
if self._needs_persistence:
self._create_token(token_id, data)
return token_id, token_data
def invalidate_individual_token_cache(self, token_id):
# NOTE(morganfainberg): invalidate takes the exact same arguments as
# the normal method, this means we need to pass "self" in (which gets
# stripped off).
# FIXME(morganfainberg): Does this cache actually need to be
# invalidated? We maintain a cached revocation list, which should be
# consulted before accepting a token as valid. For now we will
# do the explicit individual token invalidation.
self._validate_token.invalidate(self, token_id)
self._validate_v2_token.invalidate(self, token_id)
self._validate_v3_token.invalidate(self, token_id)
def revoke_token(self, token_id, revoke_chain=False):
revoke_by_expires = False
project_id = None
domain_id = None
token_ref = token_model.KeystoneToken(
token_id=token_id,
token_data=self.validate_token(token_id))
user_id = token_ref.user_id
expires_at = token_ref.expires
audit_id = token_ref.audit_id
audit_chain_id = token_ref.audit_chain_id
if token_ref.project_scoped:
project_id = token_ref.project_id
if token_ref.domain_scoped:
domain_id = token_ref.domain_id
if audit_id is None and not revoke_chain:
LOG.debug('Received token with no audit_id.')
revoke_by_expires = True
if audit_chain_id is None and revoke_chain:
LOG.debug('Received token with no audit_chain_id.')
revoke_by_expires = True
if revoke_by_expires:
self.revoke_api.revoke_by_expiration(user_id, expires_at,
project_id=project_id,
domain_id=domain_id)
elif revoke_chain:
self.revoke_api.revoke_by_audit_chain_id(audit_chain_id,
project_id=project_id,
domain_id=domain_id)
else:
self.revoke_api.revoke_by_audit_id(audit_id)
if CONF.token.revoke_by_id and self._needs_persistence:
self._persistence.delete_token(token_id=token_id)
def list_revoked_tokens(self):
return self._persistence.list_revoked_tokens()
def _trust_deleted_event_callback(self, service, resource_type, operation,
payload):
if CONF.token.revoke_by_id:
trust_id = payload['resource_info']
trust = self.trust_api.get_trust(trust_id, deleted=True)
self._persistence.delete_tokens(user_id=trust['trustor_user_id'],
trust_id=trust_id)
def _delete_user_tokens_callback(self, service, resource_type, operation,
payload):
if CONF.token.revoke_by_id:
user_id = payload['resource_info']
self._persistence.delete_tokens_for_user(user_id)
def _delete_domain_tokens_callback(self, service, resource_type,
operation, payload):
if CONF.token.revoke_by_id:
domain_id = payload['resource_info']
self._persistence.delete_tokens_for_domain(domain_id=domain_id)
def _delete_user_project_tokens_callback(self, service, resource_type,
operation, payload):
if CONF.token.revoke_by_id:
user_id = payload['resource_info']['user_id']
project_id = payload['resource_info']['project_id']
self._persistence.delete_tokens_for_user(user_id=user_id,
project_id=project_id)
def _delete_project_tokens_callback(self, service, resource_type,
operation, payload):
if CONF.token.revoke_by_id:
project_id = payload['resource_info']
self._persistence.delete_tokens_for_users(
self.assignment_api.list_user_ids_for_project(project_id),
project_id=project_id)
def _delete_user_oauth_consumer_tokens_callback(self, service,
resource_type, operation,
payload):
if CONF.token.revoke_by_id:
user_id = payload['resource_info']['user_id']
consumer_id = payload['resource_info']['consumer_id']
self._persistence.delete_tokens(user_id=user_id,
consumer_id=consumer_id)
@six.add_metaclass(abc.ABCMeta)
class Provider(object):
"""Interface description for a Token provider."""
@abc.abstractmethod
def needs_persistence(self):
"""Determine if the token should be persisted.
If the token provider requires that the token be persisted to a
backend this should return True, otherwise return False.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_token_version(self, token_data):
"""Return the version of the given token data.
If the given token data is unrecognizable,
UnsupportedTokenVersionException is raised.
:param token_data: token_data
:type token_data: dict
:returns: token version string
:raises keystone.exception.UnsupportedTokenVersionException:
If the token version is not expected.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None):
"""Issue a V2 token.
:param token_ref: token data to generate token from
:type token_ref: dict
:param roles_ref: optional roles list
:type roles_ref: dict
:param catalog_ref: optional catalog information
:type catalog_ref: dict
:returns: (token_id, token_data)
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def issue_v3_token(self, user_id, method_names, expires_at=None,
project_id=None, domain_id=None, auth_context=None,
trust=None, metadata_ref=None, include_catalog=True,
parent_audit_id=None):
"""Issue a V3 Token.
:param user_id: identity of the user
:type user_id: string
:param method_names: names of authentication methods
:type method_names: list
:param expires_at: optional time the token will expire
:type expires_at: string
:param project_id: optional project identity
:type project_id: string
:param domain_id: optional domain identity
:type domain_id: string
:param auth_context: optional context from the authorization plugins
:type auth_context: dict
:param trust: optional trust reference
:type trust: dict
:param metadata_ref: optional metadata reference
:type metadata_ref: dict
:param include_catalog: optional, include the catalog in token data
:type include_catalog: boolean
:param parent_audit_id: optional, the audit id of the parent token
:type parent_audit_id: string
:returns: (token_id, token_data)
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def validate_v2_token(self, token_ref):
"""Validate the given V2 token and return the token data.
Must raise Unauthorized exception if unable to validate token.
:param token_ref: the token reference
:type token_ref: dict
:returns: token data
:raises keystone.exception.TokenNotFound: If the token doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def validate_v3_token(self, token_ref):
"""Validate the given V3 token and return the token_data.
:param token_ref: the token reference
:type token_ref: dict
:returns: token data
:raises keystone.exception.TokenNotFound: If the token doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def _get_token_id(self, token_data):
"""Generate the token_id based upon the data in token_data.
:param token_data: token information
:type token_data: dict
returns: token identifier
"""
raise exception.NotImplemented() # pragma: no cover
|
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a mxcoind or Mxcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting MXC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_mxcoin.config(dbdir):
"""Read the mxcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "mxcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19394 if testnet else 9394
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the mxcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(mxcoind):
info = mxcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
mxcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = mxcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(mxcoind):
address_summary = dict()
address_to_account = dict()
for info in mxcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = mxcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = mxcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(mxcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(mxcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f MXC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to mxcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = mxcoind.createrawtransaction(inputs, outputs)
signed_rawtx = mxcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(mxcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = mxcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(mxcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = mxcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(mxcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of mxcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_mxcoin.config(options.datadir)
if options.testnet: config['testnet'] = True
mxcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(mxcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(mxcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(mxcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(mxcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = mxcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
|
from typing import Callable
from pp.components.grating_coupler.elliptical_trenches import grating_coupler_te
from pp.components.grating_coupler.elliptical_trenches import grating_coupler_tm
import pp
from pp.add_tapers import add_tapers
from pp.components.taper import taper
from pp.container import container
from pp.routing.route_fiber_array import route_fiber_array
from pp.routing.get_input_labels import get_input_labels
from pp.component import Component
def add_fiber_array_te(*args, **kwargs):
return add_fiber_array(*args, **kwargs)
def add_fiber_array_tm(*args, grating_coupler=grating_coupler_tm, **kwargs):
return add_fiber_array(*args, grating_coupler=grating_coupler, **kwargs)
@container
def add_fiber_array(
component: Component,
grating_coupler: Component = grating_coupler_te,
gc_port_name: str = "W0",
component_name: None = None,
taper_factory: Callable = taper,
taper_length: float = 10.0,
get_route_factory: Callable = route_fiber_array,
get_input_labels_function: Callable = get_input_labels,
**kwargs,
) -> Component:
"""returns component with optical IO (tapers, south routes and grating_couplers)
Args:
component: to connect
grating_coupler: grating coupler instance, function or list of functions
bend_factory: bend_circular
straight_factory: waveguide
fanout_length: None # if None, automatic calculation of fanout length
max_y0_optical: None
with_align_ports: True, adds loopback structures
waveguide_separation: 4.0
bend_radius: BEND_RADIUS
list_port_labels: None, adds TM labels to port indices in this list
connected_port_list_ids: None # only for type 0 optical routing
nb_optical_ports_lines: 1
force_manhattan: False
excluded_ports:
grating_indices: None
routing_waveguide: None
routing_method: connect_strip
gc_port_name: W0
optical_routing_type: None: autoselection, 0: no extension, 1: standard, 2: check
gc_rotation: -90
layer_label: LAYER.LABEL
input_port_indexes: [0]
component_name: for the label
taper_factory: taper function
get_route_factory: route_fiber_array
.. plot::
:include-source:
import pp
from pp.routing import add_fiber_array
c = pp.c.crossing()
cc = add_fiber_array(c)
pp.plotgds(cc)
"""
c = component
if not c.ports:
return c
if isinstance(grating_coupler, list):
gc = grating_coupler[0]
else:
gc = grating_coupler
gc = pp.call_if_func(gc)
gc_polarization = gc.polarization
component_name = component_name or c.name
name = f"{component_name}_{gc_polarization}"
cc = pp.Component(name=name)
port_width_gc = gc.ports[gc_port_name].width
optical_ports = c.get_ports_list(port_type="optical")
port_width_component = optical_ports[0].width
if port_width_component != port_width_gc:
c = add_tapers(
c,
taper_factory(
length=taper_length, width1=port_width_gc, width2=port_width_component
),
)
# for pn, p in c.ports.items():
# print(p.name, p.port_type, p.layer)
elements, io_gratings_lines, _ = get_route_factory(
component=c,
grating_coupler=grating_coupler,
gc_port_name=gc_port_name,
component_name=component_name,
get_input_labels_function=get_input_labels_function,
**kwargs,
)
if len(elements) == 0:
return c
for e in elements:
cc.add(e)
for io_gratings in io_gratings_lines:
cc.add(io_gratings)
cc.add(c.ref())
cc.move(origin=io_gratings_lines[0][0].ports[gc_port_name], destination=(0, 0))
for pname, p in c.ports.items():
if p.port_type != "optical":
cc.add_port(pname, port=p)
return cc
def test_type0():
component = pp.c.coupler(gap=0.244, length=5.67)
cc = add_fiber_array(component, optical_routing_type=0)
pp.write_gds(cc)
return cc
def test_type1():
component = pp.c.coupler(gap=0.2, length=5.0)
cc = add_fiber_array(component, optical_routing_type=1)
pp.write_gds(cc)
return cc
def test_type2():
c = pp.c.coupler(gap=0.244, length=5.67)
c.polarization = "tm"
cc = add_fiber_array(c, optical_routing_type=2)
pp.write_gds(cc)
return cc
def demo_tapers():
c = pp.c.waveguide(width=2)
cc = add_fiber_array(c, optical_routing_type=2)
return cc
def demo_te_and_tm():
c = pp.Component()
w = pp.c.waveguide()
wte = add_fiber_array(w, grating_coupler=pp.c.grating_coupler_elliptical_te)
wtm = add_fiber_array(w, grating_coupler=pp.c.grating_coupler_elliptical_tm)
c.add_ref(wte)
wtm_ref = c.add_ref(wtm)
wtm_ref.movey(wte.size_info.height)
return c
if __name__ == "__main__":
gcte = pp.c.grating_coupler_te
gctm = pp.c.grating_coupler_tm
# from pprint import pprint
layer_label = pp.LAYER.TEXT
layer_label = (66, 5)
# cc = demo_tapers()
# cc = test_type1()
# pprint(cc.get_json())
# c = pp.c.coupler(gap=0.2, length=5.6)
c = pp.c.mmi2x2()
# c = pp.c.waveguide()
c.y = 0
cc = add_fiber_array(
c,
# optical_routing_type=0,
# optical_routing_type=1,
# optical_routing_type=2,
# layer_label=layer_label,
# get_route_factory=route_fiber_single,
# get_route_factory=route_fiber_array,
grating_coupler=[gcte, gctm, gcte, gctm],
)
# cc = demo_te_and_tm()
# print(cc.ports.keys())
pp.show(cc)
print(cc.get_settings()["component"])
|
|
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Verifies that all source files contain the necessary copyright boilerplate
# snippet.
# This is based on existing work
# https://github.com/kubernetes/test-infra/blob/master/hack
# /verify_boilerplate.py
from __future__ import print_function
import argparse
import glob
import os
import re
import sys
def get_args():
"""Parses command line arguments.
Configures and runs argparse.ArgumentParser to extract command line
arguments.
Returns:
An argparse.Namespace containing the arguments parsed from the
command line
"""
parser = argparse.ArgumentParser()
parser.add_argument("filenames",
help="list of files to check, "
"all files if unspecified",
nargs='*')
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir",
default=rootdir,
help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "test/boilerplate")
parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir)
return parser.parse_args()
def get_refs(ARGS):
"""Converts the directory of boilerplate files into a map keyed by file
extension.
Reads each boilerplate file's contents into an array, then adds that array
to a map keyed by the file extension.
Returns:
A map of boilerplate lines, keyed by file extension. For example,
boilerplate.py.txt would result in the k,v pair {".py": py_lines} where
py_lines is an array containing each line of the file.
"""
refs = {}
# Find and iterate over the absolute path for each boilerplate template
for path in glob.glob(os.path.join(
ARGS.boilerplate_dir,
"boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
# pylint: disable=too-many-locals
def has_valid_header(filename, refs, regexs):
"""Test whether a file has the correct boilerplate header.
Tests each file against the boilerplate stored in refs for that file type
(based on extension), or by the entire filename (eg Dockerfile, Makefile).
Some heuristics are applied to remove build tags and shebangs, but little
variance in header formatting is tolerated.
Args:
filename: A string containing the name of the file to test
refs: A map of boilerplate headers, keyed by file extension
regexs: a map of compiled regex objects used in verifying boilerplate
Returns:
True if the file has the correct boilerplate header, otherwise returns
False.
"""
try:
with open(filename, 'r') as fp: # pylint: disable=invalid-name
data = fp.read()
except IOError:
return False
basename = os.path.basename(filename)
extension = get_file_extension(filename)
if extension:
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go":
con = regexs["go_build_constraints"]
(data, found) = con.subn("", data, 1)
# remove shebang
elif extension == "sh" or extension == "py":
she = regexs["shebang"]
(data, found) = she.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
year = regexs["year"]
for datum in data:
if year.search(datum):
return False
# if we don't match the reference at this point, fail
if ref != data:
return False
return True
def get_file_extension(filename):
"""Extracts the extension part of a filename.
Identifies the extension as everything after the last period in filename.
Args:
filename: string containing the filename
Returns:
A string containing the extension in lowercase
"""
return os.path.splitext(filename)[1].split(".")[-1].lower()
# These directories will be omitted from header checks
SKIPPED_DIRS = [
'Godeps', 'third_party', '_gopath', '_output',
'.git', 'vendor', '__init__.py', 'node_modules'
]
def normalize_files(files):
"""Extracts the files that require boilerplate checking from the files
argument.
A new list will be built. Each path from the original files argument will
be added unless it is within one of SKIPPED_DIRS. All relative paths will
be converted to absolute paths by prepending the root_dir path parsed from
the command line, or its default value.
Args:
files: a list of file path strings
Returns:
A modified copy of the files list where any any path in a skipped
directory is removed, and all paths have been made absolute.
"""
newfiles = []
for pathname in files:
if any(x in pathname for x in SKIPPED_DIRS):
continue
newfiles.append(pathname)
for idx, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[idx] = os.path.join(ARGS.rootdir, pathname)
return newfiles
def get_files(extensions, ARGS):
"""Generates a list of paths whose boilerplate should be verified.
If a list of file names has been provided on the command line, it will be
treated as the initial set to search. Otherwise, all paths within rootdir
will be discovered and used as the initial set.
Once the initial set of files is identified, it is normalized via
normalize_files() and further stripped of any file name whose extension is
not in extensions.
Args:
extensions: a list of file extensions indicating which file types
should have their boilerplate verified
Returns:
A list of absolute file paths
"""
files = []
if ARGS.filenames:
files = ARGS.filenames
else:
for root, dirs, walkfiles in os.walk(ARGS.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for dpath in SKIPPED_DIRS:
if dpath in dirs:
dirs.remove(dpath)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = get_file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
"""Builds a map of regular expressions used in boilerplate validation.
There are two scenarios where these regexes are used. The first is in
validating the date referenced is the boilerplate, by ensuring it is an
acceptable year. The second is in identifying non-boilerplate elements,
like shebangs and compiler hints that should be ignored when validating
headers.
Returns:
A map of compiled regular expression objects, keyed by mnemonic.
"""
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the
# real thing
regexs["year"] = re.compile('YEAR')
# dates can be 2014, 2015, 2016 or 2017, company holder names can be
# anything
regexs["date"] = re.compile('(2014|2015|2016|2017|2018|2019)')
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n",
re.MULTILINE)
# strip #!.* from shell/python scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main(args):
"""Identifies and verifies files that should have the desired boilerplate.
Retrieves the lists of files to be validated and tests each one in turn.
If all files contain correct boilerplate, this function terminates
normally. Otherwise it prints the name of each non-conforming file and
exists with a non-zero status code.
"""
regexs = get_regexs()
refs = get_refs(args)
filenames = get_files(refs.keys(), args)
nonconforming_files = []
for filename in filenames:
if not has_valid_header(filename, refs, regexs):
nonconforming_files.append(filename)
if nonconforming_files:
print('%d files have incorrect boilerplate headers:' % len(
nonconforming_files))
for filename in sorted(nonconforming_files):
print(os.path.relpath(filename, args.rootdir))
sys.exit(1)
if __name__ == "__main__":
ARGS = get_args()
main(ARGS)
|
|
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016,2018
import unittest
import sys
import itertools
import tempfile
import os
import uuid
from streamsx.topology.topology import *
from streamsx.topology.tester import Tester
def _create_tf():
with tempfile.NamedTemporaryFile(delete=False) as fp:
fp.write("CREATE\n".encode('utf-8'))
fp.flush()
return fp.name
class EnterExit(object):
def __init__(self, tf):
self.tf = tf
self._report('__init__')
def __enter__(self):
self._report('__enter__')
def __exit__(self, exc_type, exc_value, traceback):
self._report('__exit__')
if exc_type:
self._report(exc_type.__name__)
def __call__(self, t):
return t
def _report(self, txt):
with open(self.tf, 'a') as fp:
fp.write(str(txt))
fp.write(str('\n'))
fp.flush()
class ExcOnEnter(EnterExit):
def __enter__(self):
super(ExcOnEnter,self).__enter__()
raise ValueError('INTENTIONAL ERROR: __enter__ has failed!')
class ExcOnEnterSource(ExcOnEnter):
def __call__(self): return []
class BadData(EnterExit):
def __call__(self, t):
return {'a':'A' + str(t)}
class BadHash(EnterExit):
def __call__(self, t):
return 'A'
class BadDataFlatMap(EnterExit):
def __call__(self, t):
return [{'a':'A' + str(t)}]
class BadCall(EnterExit):
def __call__(self, t):
d = {}
return d['INTENTIONAL ERROR: notthere']
class BadSource(EnterExit):
def __call__(self):
d = {}
return d['INTENTIONAL ERROR: notthere']
class BadSourceIter(EnterExit):
def __call__(self):
return self
def __iter__(self):
raise UnicodeError("INTENTIONAL ERROR: Bad source __iter__")
class BadSourceNext(EnterExit):
def __call__(self):
return self
def __iter__(self):
return self
def __next__(self):
raise IndexError("INTENTIONAL ERROR: Bad source __next__")
class TestBaseExceptions(unittest.TestCase):
""" Test exceptions in callables
"""
_multiprocess_can_split_ = True
def setUp(self):
self.tf = _create_tf()
Tester.setup_standalone(self)
def tearDown(self):
if self.tf:
os.remove(self.tf)
def _result(self, n):
with open(self.tf) as fp:
content = fp.readlines()
self.assertTrue(len(content) >=3, msg=str(content))
self.assertEqual('CREATE\n', content[0])
self.assertEqual('__init__\n', content[1])
self.assertEqual('__enter__\n', content[2])
self.assertEqual(n, len(content), msg=str(content))
return content
class TestExceptions(TestBaseExceptions):
def test_context_mgr_ok(self):
try:
with EnterExit(self.tf) as x:
pass
except ValueError:
pass
content = self._result(4)
self.assertEqual('__exit__\n', content[3])
def test_context_mgr_enter_raise(self):
try:
with ExcOnEnter(self.tf) as x:
pass
except ValueError:
pass
self._result(3)
def test_context_mgr_body_raise(self):
try:
with EnterExit(self.tf) as x:
raise TypeError
except TypeError:
pass
content = self._result(5)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('TypeError\n', content[4])
def _run_app(self, fn=None, data=None):
topo = Topology('TE' + str(uuid.uuid4().hex))
if data is None:
data = [1,2,3]
se = topo.source(data)
if fn is not None:
se = fn(se)
tester = Tester(topo)
tester.run_for(3)
ok = tester.test(self.test_ctxtype, self.test_config, assert_on_fail=False)
self.assertFalse(ok)
def test_exc_on_enter_map(self):
"""Test exception on enter.
"""
self._run_app(lambda se : se.map(ExcOnEnter(self.tf)))
self._result(3)
def test_exc_on_data_conversion_map(self):
"""Test exception on enter.
"""
self._run_app(lambda se :
se.map(BadData(self.tf), schema='tuple<int32 a>'))
content = self._result(5)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('TypeError\n', content[4])
def test_exc_on_bad_call_map(self):
"""Test exception in __call__
"""
self._run_app(lambda se :
se.map(BadCall(self.tf), schema='tuple<int32 a>'))
content = self._result(5)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('KeyError\n', content[4])
def test_exc_on_enter_flat_map(self):
"""Test exception on enter.
"""
self._run_app(lambda se : se.flat_map(ExcOnEnter(self.tf)))
self._result(3)
def test_exc_on_bad_call_flat_map(self):
"""Test exception in __call__
"""
self._run_app(lambda se :
se.flat_map(BadCall(self.tf)))
content = self._result(5)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('KeyError\n', content[4])
def test_exc_on_enter_filter(self):
"""Test exception on enter.
"""
self._run_app(lambda se :
se.filter(ExcOnEnter(self.tf)))
self._result(3)
def test_exc_on_bad_call_filter(self):
"""Test exception in __call__
"""
self._run_app(lambda se :
se.filter(BadCall(self.tf)))
content = self._result(5)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('KeyError\n', content[4])
def test_exc_on_enter_for_each(self):
"""Test exception on enter.
"""
self._run_app(lambda se : se.for_each(ExcOnEnter(self.tf)))
self._result(3)
def test_exc_on_bad_call_for_each(self):
"""Test exception in __call__
"""
self._run_app(lambda se :
se.for_each(BadCall(self.tf)))
content = self._result(5)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('KeyError\n', content[4])
def test_exc_on_enter_hash(self):
"""Test exception on enter.
"""
self._run_app(lambda se :
se.parallel(1, routing=Routing.HASH_PARTITIONED, func=ExcOnEnter(self.tf)))
self._result(3)
def test_exc_on_bad_call_hash(self):
"""Test exception in __call__
"""
self._run_app(lambda se :
se.parallel(1, routing=Routing.HASH_PARTITIONED, func=BadCall(self.tf)))
content = self._result(5)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('KeyError\n', content[4])
def test_exc_on_data_conversion_hash(self):
"""Test exception on enter.
"""
self._run_app(lambda se :
se.parallel(1, routing=Routing.HASH_PARTITIONED, func=BadHash(self.tf)))
content = self._result(5)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('TypeError\n', content[4])
def test_exc_on_enter_source(self):
"""Test exception on enter.
"""
self._run_app(data=ExcOnEnterSource(self.tf))
self._result(3)
def test_exc_on_bad_call_source(self):
"""Test exception in __call__
This is the __call__ that sets up the iterator
"""
self._run_app(data=BadSource(self.tf))
content = self._result(5)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('KeyError\n', content[4])
def test_exc_on_bad_iter_source(self):
"""Test exception in __iter__
"""
self._run_app(data=BadSourceIter(self.tf))
content = self._result(5)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('UnicodeError\n', content[4])
def test_exc_on_bad_next_source(self):
"""Test exception in __iter__
"""
self._run_app(data=BadSourceNext(self.tf))
content = self._result(5)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('IndexError\n', content[4])
def test_exc_on_enter_aggregate(self):
"""Test exception on enter.
"""
self._run_app(lambda se : se.last(10).aggregate(ExcOnEnter(self.tf)))
self._result(3)
def test_exc_on_bad_call_aggregate(self):
"""Test exception in __call__
"""
self._run_app(lambda se :
se.last(10).aggregate(BadCall(self.tf)))
content = self._result(5)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('KeyError\n', content[4])
class SuppressSourceCall(EnterExit):
def __call__(self):
raise ValueError("INTENTIONAL ERROR: Error setting up iterable")
def __exit__(self, exc_type, exc_value, traceback):
super(SuppressSourceCall, self).__exit__(exc_type, exc_value, traceback)
return exc_type == ValueError
class SuppressSourceIter(EnterExit):
def __call__(self):
return self
def __iter__(self):
raise ValueError("INTENTIONAL ERROR: Error setting up iterable")
def __exit__(self, exc_type, exc_value, traceback):
super(SuppressSourceIter, self).__exit__(exc_type, exc_value, traceback)
return exc_type == ValueError
class SuppressSourceNext(EnterExit):
def __call__(self):
return self
def __iter__(self):
self.count = 3
return self
def __next__(self):
self.count += 1
if self.count == 5:
raise ValueError("INTENTIONAL ERROR: Skip 5!")
if self.count == 7:
raise StopIteration()
return self.count
def __exit__(self, exc_type, exc_value, traceback):
super(SuppressSourceNext, self).__exit__(exc_type, exc_value, traceback)
return exc_type == ValueError
class SuppressMapCall(EnterExit):
def __call__(self, t):
if t == 2:
raise ValueError("INTENTIONAL ERROR: Skip 2")
return t
def __exit__(self, exc_type, exc_value, traceback):
super(SuppressMapCall, self).__exit__(exc_type, exc_value, traceback)
return exc_type == ValueError
class SuppressFlatMapCall(EnterExit):
def __call__(self, t):
if t == 2:
raise ValueError("INTENTIONAL ERROR: Skip 2")
return [t, t]
def __exit__(self, exc_type, exc_value, traceback):
super(SuppressFlatMapCall, self).__exit__(exc_type, exc_value, traceback)
return exc_type == ValueError
class SuppressFilterCall(EnterExit):
def __call__(self, t):
if t != 2:
raise ValueError("INTENTIONAL ERROR: Skip everything but 2")
return t
def __exit__(self, exc_type, exc_value, traceback):
super(SuppressFilterCall, self).__exit__(exc_type, exc_value, traceback)
return exc_type == ValueError
class SuppressForEach(EnterExit):
def __call__(self, t):
if t == 1:
raise ValueError("INTENTIONAL ERROR: Skip 1")
return t
def __exit__(self, exc_type, exc_value, traceback):
super(SuppressForEach, self).__exit__(exc_type, exc_value, traceback)
return exc_type == ValueError
class SuppressHash(EnterExit):
def __call__(self, t):
if t == 3:
raise ValueError("INTENTIONAL ERROR: Skip 3")
return hash(t)
def __exit__(self, exc_type, exc_value, traceback):
super(SuppressHash, self).__exit__(exc_type, exc_value, traceback)
return exc_type == ValueError
class TestSuppressExceptions(TestBaseExceptions):
""" Test exception suppression in callables
"""
def _run_app(self, fn=None, data=None, n=None, e=None):
topo = Topology('TSE' + str(uuid.uuid4().hex))
if data is None:
data = [1,2,3]
se = topo.source(data)
if fn is not None:
se = fn(se)
tester = Tester(topo)
if n is not None:
tester.tuple_count(se, n)
if e is not None:
tester.contents(se, e)
tester.run_for(3)
tester.test(self.test_ctxtype, self.test_config)
def test_exc_on_call_source(self):
"""Ignore exception on __call__.
Effectively since we've been told to ignore the __call__
exception we have no data source so we create an empty stream.
"""
self._run_app(data=SuppressSourceCall(self.tf), n=0)
content = self._result(6)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('ValueError\n', content[4])
self.assertEqual('__exit__\n', content[5])
def test_exc_on_iter_source(self):
"""Ignore exception on __iter__.
Effectively since we've been told to ignore the __iter__
exception we have no data source so we create an empty stream.
"""
self._run_app(data=SuppressSourceIter(self.tf), n=0)
content = self._result(6)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('ValueError\n', content[4])
self.assertEqual('__exit__\n', content[5])
def test_exc_on_next_source(self):
"""Ignore exception on __next__.
Ignore that step of the iteration.
"""
self._run_app(data=SuppressSourceNext(self.tf), n=2, e=[4,6])
content = self._result(6)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('ValueError\n', content[4])
self.assertEqual('__exit__\n', content[5])
def test_exc_on_call_map(self):
"""Ignore exception on __call__.
Ignore the tuple.
"""
self._run_app(fn= lambda se : se.map(SuppressMapCall(self.tf)), n=2, e=[1,3])
content = self._result(6)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('ValueError\n', content[4])
self.assertEqual('__exit__\n', content[5])
def test_exc_on_call_filter(self):
"""Ignore exception on __call__.
Ignore the tuple.
"""
self._run_app(fn= lambda se : se.map(SuppressFilterCall(self.tf)), n=1, e=[2])
content = self._result(8)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('ValueError\n', content[4])
self.assertEqual('__exit__\n', content[5])
self.assertEqual('ValueError\n', content[6])
self.assertEqual('__exit__\n', content[7])
def test_exc_on_call_flat_map(self):
"""Ignore exception on __call__.
Ignore the tuple.
"""
self._run_app(fn= lambda se : se.flat_map(SuppressFlatMapCall(self.tf)), n=4, e=[1,1,3,3])
content = self._result(6)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('ValueError\n', content[4])
self.assertEqual('__exit__\n', content[5])
def test_exc_on_call_for_each(self):
"""Ignore exception on __call__.
Ignore the tuple.
"""
self._run_app(lambda se : se.for_each(SuppressForEach(self.tf)))
content = self._result(6)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('ValueError\n', content[4])
self.assertEqual('__exit__\n', content[5])
def test_exc_on_call_hash(self):
"""Ignore exception on __call__.
Ignore the tuple.
"""
self._run_app(lambda se :
se.parallel(1, routing=Routing.HASH_PARTITIONED, func=SuppressHash(self.tf)).filter(lambda x : True).end_parallel(), n=2, e=[1,2])
content = self._result(6)
self.assertEqual('__exit__\n', content[3])
self.assertEqual('ValueError\n', content[4])
self.assertEqual('__exit__\n', content[5])
|
|
# # header
# coding: utf-8
if False: # MYPY
from typing import Any, Dict, Optional, List # NOQA
SHOWLINES = True
class Token(object):
__slots__ = 'start_mark', 'end_mark', '_comment',
def __init__(self, start_mark, end_mark):
# type: (Any, Any) -> None
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
# type: () -> Any
# attributes = [key for key in self.__slots__ if not key.endswith('_mark') and
# hasattr('self', key)]
attributes = [key for key in self.__slots__ if not key.endswith('_mark')]
attributes.sort()
arguments = u', '.join([u'%s=%r' % (key, getattr(self, key))
for key in attributes])
if SHOWLINES:
try:
arguments += u', line: ' + str(self.start_mark.line)
except:
pass
return u'{}({})'.format(self.__class__.__name__, arguments)
def add_post_comment(self, comment):
# type: (Any) -> None
if not hasattr(self, '_comment'):
self._comment = [None, None]
self._comment[0] = comment
def add_pre_comments(self, comments):
# type: (Any) -> None
if not hasattr(self, '_comment'):
self._comment = [None, None]
assert self._comment[1] is None
self._comment[1] = comments
def get_comment(self):
# type: () -> Any
return getattr(self, '_comment', None)
@property
def comment(self):
# type: () -> Any
return getattr(self, '_comment', None)
def move_comment(self, target, empty=False):
# type: (Any, bool) -> Any
"""move a comment from this token to target (normally next token)
used to combine e.g. comments before a BlockEntryToken to the
ScalarToken that follows it
empty is a special for empty values -> comment after key
"""
c = self.comment
if c is None:
return
# don't push beyond last element
if isinstance(target, StreamEndToken):
return
delattr(self, '_comment')
tc = target.comment
if not tc: # target comment, just insert
# special for empty value in key: value issue 25
if empty:
c = [c[0], c[1], None, None, c[0]]
target._comment = c
# nprint('mco2:', self, target, target.comment, empty)
return self
if c[0] and tc[0] or c[1] and tc[1]:
raise NotImplementedError('overlap in comment %r %r' % c, tc)
if c[0]:
tc[0] = c[0]
if c[1]:
tc[1] = c[1]
return self
def split_comment(self):
# type: () -> Any
""" split the post part of a comment, and return it
as comment to be added. Delete second part if [None, None]
abc: # this goes to sequence
# this goes to first element
- first element
"""
comment = self.comment
if comment is None or comment[0] is None:
return None # nothing to do
ret_val = [comment[0], None]
if comment[1] is None:
delattr(self, '_comment')
return ret_val
# class BOMToken(Token):
# id = '<byte order mark>'
class DirectiveToken(Token):
__slots__ = 'name', 'value',
id = '<directive>'
def __init__(self, name, value, start_mark, end_mark):
# type: (Any, Any, Any, Any) -> None
Token.__init__(self, start_mark, end_mark)
self.name = name
self.value = value
class DocumentStartToken(Token):
__slots__ = ()
id = '<document start>'
class DocumentEndToken(Token):
__slots__ = ()
id = '<document end>'
class StreamStartToken(Token):
__slots__ = 'encoding',
id = '<stream start>'
def __init__(self, start_mark=None, end_mark=None, encoding=None):
# type: (Any, Any, Any) -> None
Token.__init__(self, start_mark, end_mark)
self.encoding = encoding
class StreamEndToken(Token):
__slots__ = ()
id = '<stream end>'
class BlockSequenceStartToken(Token):
__slots__ = ()
id = '<block sequence start>'
class BlockMappingStartToken(Token):
__slots__ = ()
id = '<block mapping start>'
class BlockEndToken(Token):
__slots__ = ()
id = '<block end>'
class FlowSequenceStartToken(Token):
__slots__ = ()
id = '['
class FlowMappingStartToken(Token):
__slots__ = ()
id = '{'
class FlowSequenceEndToken(Token):
__slots__ = ()
id = ']'
class FlowMappingEndToken(Token):
__slots__ = ()
id = '}'
class KeyToken(Token):
__slots__ = ()
id = '?'
# def x__repr__(self):
# return 'KeyToken({})'.format(
# self.start_mark.buffer[self.start_mark.index:].split(None, 1)[0])
class ValueToken(Token):
__slots__ = ()
id = ':'
class BlockEntryToken(Token):
__slots__ = ()
id = '-'
class FlowEntryToken(Token):
__slots__ = ()
id = ','
class AliasToken(Token):
__slots__ = 'value',
id = '<alias>'
def __init__(self, value, start_mark, end_mark):
# type: (Any, Any, Any) -> None
Token.__init__(self, start_mark, end_mark)
self.value = value
class AnchorToken(Token):
__slots__ = 'value',
id = '<anchor>'
def __init__(self, value, start_mark, end_mark):
# type: (Any, Any, Any) -> None
Token.__init__(self, start_mark, end_mark)
self.value = value
class TagToken(Token):
__slots__ = 'value',
id = '<tag>'
def __init__(self, value, start_mark, end_mark):
# type: (Any, Any, Any) -> None
Token.__init__(self, start_mark, end_mark)
self.value = value
class ScalarToken(Token):
__slots__ = 'value', 'plain', 'style',
id = '<scalar>'
def __init__(self, value, plain, start_mark, end_mark, style=None):
# type: (Any, Any, Any, Any, Any) -> None
Token.__init__(self, start_mark, end_mark)
self.value = value
self.plain = plain
self.style = style
class CommentToken(Token):
__slots__ = 'value', 'pre_done',
id = '<comment>'
def __init__(self, value, start_mark, end_mark):
# type: (Any, Any, Any) -> None
Token.__init__(self, start_mark, end_mark)
self.value = value
def reset(self):
# type: () -> None
if hasattr(self, 'pre_done'):
delattr(self, 'pre_done')
def __repr__(self):
# type: () -> Any
v = u'{!r}'.format(self.value)
if SHOWLINES:
try:
v += u', line: ' + str(self.start_mark.line)
except:
pass
return 'CommentToken({})'.format(v)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest.mock import patch
from asana import Client
from airflow.models import Connection
from airflow.providers.asana.hooks.asana import AsanaHook
class TestAsanaHook(unittest.TestCase):
"""
Tests for AsanaHook Asana client retrieval
"""
def test_asana_client_retrieved(self):
"""
Test that we successfully retrieve an Asana client given a Connection with complete information.
:return: None
"""
with patch.object(
AsanaHook, "get_connection", return_value=Connection(conn_type="asana", password="test")
):
hook = AsanaHook()
client = hook.get_conn()
self.assertEqual(type(client), Client)
def test_missing_password_raises(self):
"""
Test that the Asana hook raises an exception if password not provided in connection.
:return: None
"""
with patch.object(AsanaHook, "get_connection", return_value=Connection(conn_type="asana")):
hook = AsanaHook()
with self.assertRaises(ValueError):
hook.get_conn()
def test_merge_create_task_parameters_default_project(self):
"""
Test that merge_create_task_parameters correctly merges the default and method parameters when we
do not override the default project.
:return: None
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__project": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"name": "test", "projects": ["1"]}
self.assertEqual(
expected_merged_params, hook._merge_create_task_parameters("test", {}) # pylint: disable=W0212
)
def test_merge_create_task_parameters_specified_project(self):
"""
Test that merge_create_task_parameters correctly merges the default and method parameters when we
override the default project.
:return: None
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__project": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"name": "test", "projects": ["1", "2"]}
self.assertEqual(
expected_merged_params,
hook._merge_create_task_parameters("test", {"projects": ["1", "2"]}), # pylint: disable=W0212
)
def test_merge_create_task_parameters_specified_workspace(self):
"""
Test that merge_create_task_parameters correctly merges the default and method parameters when we
do not override the default workspace.
:return: None
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__workspace": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"name": "test", "workspace": "1"}
self.assertEqual(
expected_merged_params, hook._merge_create_task_parameters("test", {}) # pylint: disable=W0212
)
def test_merge_create_task_parameters_default_project_overrides_default_workspace(self):
"""
Test that merge_create_task_parameters uses the default project over the default workspace
if it is available
:return: None
"""
conn = Connection(
conn_type="asana",
password="test",
extra='{"extra__asana__workspace": "1", "extra__asana__project": "1"}',
)
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"name": "test", "projects": ["1"]}
self.assertEqual(
expected_merged_params, hook._merge_create_task_parameters("test", {}) # pylint: disable=W0212
)
def test_merge_create_task_parameters_specified_project_overrides_default_workspace(self):
"""
Test that merge_create_task_parameters uses the method parameter project over the default workspace
if it is available
:return: None
"""
conn = Connection(
conn_type="asana",
password="test",
extra='{"extra__asana__workspace": "1"}',
)
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"name": "test", "projects": ["2"]}
self.assertEqual(
expected_merged_params,
hook._merge_create_task_parameters("test", {"projects": ["2"]}), # pylint: disable=W0212
)
def test_merge_find_task_parameters_default_project(self):
"""
Test that merge_find_task_parameters correctly merges the default and method parameters when we
do not override the default project.
:return: None
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__project": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"project": "1"}
self.assertEqual(
expected_merged_params, hook._merge_find_task_parameters({}) # pylint: disable=W0212
)
def test_merge_find_task_parameters_specified_project(self):
"""
Test that merge_find_task_parameters correctly merges the default and method parameters when we
do override the default project.
:return: None
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__project": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"project": "2"}
self.assertEqual(
expected_merged_params,
hook._merge_find_task_parameters({"project": "2"}), # pylint: disable=W0212
)
def test_merge_find_task_parameters_default_workspace(self):
"""
Test that merge_find_task_parameters correctly merges the default and method parameters when we
do not override the default workspace.
:return: None
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__workspace": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"workspace": "1", "assignee": "1"}
self.assertEqual(
expected_merged_params,
hook._merge_find_task_parameters({"assignee": "1"}), # pylint: disable=W0212
)
def test_merge_find_task_parameters_specified_workspace(self):
"""
Test that merge_find_task_parameters correctly merges the default and method parameters when we
do override the default workspace.
:return: None
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__workspace": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"workspace": "2", "assignee": "1"}
self.assertEqual(
expected_merged_params,
hook._merge_find_task_parameters({"workspace": "2", "assignee": "1"}), # pylint: disable=W0212
)
def test_merge_find_task_parameters_default_project_overrides_workspace(self):
"""
Test that merge_find_task_parameters uses the default project over the workspace if it is available
:return: None
"""
conn = Connection(
conn_type="asana",
password="test",
extra='{"extra__asana__workspace": "1", "extra__asana__project": "1"}',
)
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"project": "1"}
self.assertEqual(
expected_merged_params, hook._merge_find_task_parameters({}) # pylint: disable=W0212
)
def test_merge_find_task_parameters_specified_project_overrides_workspace(self):
"""
Test that merge_find_task_parameters uses the method parameter project over the default workspace
if it is available
:return: None
"""
conn = Connection(
conn_type="asana",
password="test",
extra='{"extra__asana__workspace": "1"}',
)
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"project": "2"}
self.assertEqual(
expected_merged_params,
hook._merge_find_task_parameters({"project": "2"}), # pylint: disable=W0212
)
def test_merge_project_parameters(self):
"""
Tests that default workspace is used if not overridden
:return:
"""
conn = Connection(conn_type="asana", password="test", extra='{"extra__asana__workspace": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"workspace": "1", "name": "name"}
self.assertEqual(
expected_merged_params, hook._merge_project_parameters({"name": "name"}) # pylint: disable=W0212
)
def test_merge_project_parameters_override(self):
"""
Tests that default workspace is successfully overridden
:return:
"""
conn = Connection(conn_type='asana', password='test', extra='{"extra__asana__workspace": "1"}')
with patch.object(AsanaHook, "get_connection", return_value=conn):
hook = AsanaHook()
expected_merged_params = {"workspace": "2"}
self.assertEqual(
expected_merged_params,
hook._merge_project_parameters({"workspace": "2"}), # pylint: disable=W0212
)
|
|
# Copyright 2010 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import re
import stevedore
from oslo.config import cfg
from oslo import messaging
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.compute import flavors
from nova import exception
from nova.image import glance
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import policy
from nova import utils
CONF = cfg.CONF
CONF.import_opt('enable_instance_password',
'nova.api.openstack.compute.servers')
CONF.import_opt('network_api_class', 'nova.network')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
CONF.import_opt('extensions_blacklist', 'nova.api.openstack', group='osapi_v3')
CONF.import_opt('extensions_whitelist', 'nova.api.openstack', group='osapi_v3')
LOG = logging.getLogger(__name__)
authorizer = extensions.core_authorizer('compute:v3', 'servers')
class ServersController(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
EXTENSION_CREATE_NAMESPACE = 'nova.api.v3.extensions.server.create'
EXTENSION_DESERIALIZE_EXTRACT_SERVER_NAMESPACE = (
'nova.api.v3.extensions.server.create.deserialize')
EXTENSION_REBUILD_NAMESPACE = 'nova.api.v3.extensions.server.rebuild'
EXTENSION_DESERIALIZE_EXTRACT_REBUILD_NAMESPACE = (
'nova.api.v3.extensions.server.rebuild.deserialize')
EXTENSION_UPDATE_NAMESPACE = 'nova.api.v3.extensions.server.update'
_view_builder_class = views_servers.ViewBuilderV3
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = filter(lambda l: l['rel'] == 'self',
robj.obj['server']['links'])
if link:
robj['Location'] = utils.utf8(link[0]['href'])
# Convenience return
return robj
def __init__(self, **kwargs):
def _check_load_extension(required_function):
def check_whiteblack_lists(ext):
# Check whitelist is either empty or if not then the extension
# is in the whitelist
if (not CONF.osapi_v3.extensions_whitelist or
ext.obj.alias in CONF.osapi_v3.extensions_whitelist):
# Check the extension is not in the blacklist
if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist:
return True
else:
LOG.warning(_("Not loading %s because it is "
"in the blacklist"), ext.obj.alias)
return False
else:
LOG.warning(
_("Not loading %s because it is not in the whitelist"),
ext.obj.alias)
return False
def check_load_extension(ext):
if isinstance(ext.obj, extensions.V3APIExtensionBase):
# Filter out for the existence of the required
# function here rather than on every request. We
# don't have a new abstract base class to reduce
# duplication in the extensions as they may want
# to implement multiple server (and other) entry
# points if hasattr(ext.obj, 'server_create'):
if hasattr(ext.obj, required_function):
LOG.debug(_('extension %(ext_alias)s detected by '
'servers extension for function %(func)s'),
{'ext_alias': ext.obj.alias,
'func': required_function})
return check_whiteblack_lists(ext)
else:
LOG.debug(
_('extension %(ext_alias)s is missing %(func)s'),
{'ext_alias': ext.obj.alias,
'func': required_function})
return False
else:
return False
return check_load_extension
self.extension_info = kwargs.pop('extension_info')
super(ServersController, self).__init__(**kwargs)
self.compute_api = compute.API()
# Look for implementation of extension point of server creation
self.create_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_CREATE_NAMESPACE,
check_func=_check_load_extension('server_create'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.create_extension_manager):
LOG.debug(_("Did not find any server create extensions"))
# Look for implementation of extension point of server rebuild
self.rebuild_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_REBUILD_NAMESPACE,
check_func=_check_load_extension('server_rebuild'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.rebuild_extension_manager):
LOG.debug(_("Did not find any server rebuild extensions"))
# Look for implementation of extension point of server update
self.update_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_UPDATE_NAMESPACE,
check_func=_check_load_extension('server_update'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.update_extension_manager):
LOG.debug(_("Did not find any server update extensions"))
def index(self, req):
"""Returns a list of server names and ids for a given user."""
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def detail(self, req):
"""Returns a list of server details for a given user."""
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options())
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
status = search_opts.pop('status', None)
if status is not None:
vm_state, task_state = common.task_and_vm_state_from_status(status)
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
# When we search by vm state, task state will return 'default'.
# So we don't need task_state search_opt.
if 'default' not in task_state:
search_opts['task_state'] = task_state
if 'changes_since' in search_opts:
try:
parsed = timeutils.parse_isotime(search_opts['changes_since'])
except ValueError:
msg = _('Invalid changes_since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes_since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes_since' is specified, because 'changes_since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
if 'changes_since' not in search_opts:
# No 'changes_since', so we only want non-deleted servers
search_opts['deleted'] = False
if 'changes_since' in search_opts:
search_opts['changes-since'] = search_opts.pop('changes_since')
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPForbidden(explanation=msg)
# If tenant_id is passed as a search parameter this should
# imply that all_tenants is also enabled unless explicitly
# disabled. Note that the tenant_id parameter is filtered out
# by remove_invalid_options above unless the requestor is an
# admin.
if 'tenant_id' in search_opts and not 'all_tenants' in search_opts:
# We do not need to add the all_tenants flag if the tenant
# id associated with the token is the tenant id
# specified. This is done so a request that does not need
# the all_tenants flag does not fail because of lack of
# policy permission for compute:get_all_tenants when it
# doesn't actually need it.
if context.project_id != search_opts.get('tenant_id'):
search_opts['all_tenants'] = 1
# If all tenants is passed with 0 or false as the value
# then remove it from the search options. Nothing passed as
# the value for all_tenants is considered to enable the feature
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
if not strutils.bool_from_string(all_tenants, True):
del search_opts['all_tenants']
except ValueError as err:
raise exception.InvalidInput(str(err))
if 'all_tenants' in search_opts:
policy.enforce(context, 'compute:get_all_tenants',
{'project_id': context.project_id,
'user_id': context.user_id})
del search_opts['all_tenants']
else:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
try:
instance_list = self.compute_api.get_all(context,
search_opts=search_opts, limit=limit, marker=marker,
want_objects=True, expected_attrs=['pci_devices'])
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
log_msg = _("Flavor '%s' could not be found ")
LOG.debug(log_msg, search_opts['flavor'])
# TODO(mriedem): Move to ObjectListBase.__init__ for empty lists.
instance_list = instance_obj.InstanceList(objects=[])
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid):
"""Utility function for looking up an instance by uuid."""
instance = common.get_instance(self.compute_api, context,
instance_uuid, want_objects=True,
expected_attrs=['pci_devices'])
req.cache_db_instance(instance)
return instance
def _check_string_length(self, value, name, max_length=None):
try:
if isinstance(value, six.string_types):
value = value.strip()
utils.check_string_length(value, name, min_length=1,
max_length=max_length)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
def _validate_server_name(self, value):
self._check_string_length(value, 'Server name', max_length=255)
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
for network in requested_networks:
try:
# fixed IP address is optional
# if the fixed IP address is not provided then
# it will use one of the available IP address from the network
address = network.get('fixed_ip', None)
if address is not None and not utils.is_valid_ip_address(
address):
msg = _("Invalid fixed IP address (%s)") % address
raise exc.HTTPBadRequest(explanation=msg)
port_id = network.get('port', None)
if port_id:
network_uuid = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument: port")
raise exc.HTTPBadRequest(explanation=msg)
if not uuidutils.is_uuid_like(port_id):
msg = _("Bad port format: port uuid is "
"not in proper format "
"(%s)") % port_id
raise exc.HTTPBadRequest(explanation=msg)
if address is not None:
msg = _("Specified Fixed IP '%(addr)s' cannot be used "
"with port '%(port)s': port already has "
"a Fixed IP allocated.") % {"addr": address,
"port": port_id}
raise exc.HTTPBadRequest(explanation=msg)
else:
network_uuid = network['uuid']
if not port_id and not uuidutils.is_uuid_like(network_uuid):
br_uuid = network_uuid.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % network_uuid
raise exc.HTTPBadRequest(explanation=msg)
# For neutronv2, requested_networks
# should be tuple of (network_uuid, fixed_ip, port_id)
if utils.is_neutron():
networks.append((network_uuid, address, port_id))
else:
# check if the network id is already present in the list,
# we don't want duplicate networks to be passed
# at the boot time
for id, ip in networks:
if id == network_uuid:
expl = (_("Duplicate networks"
" (%s) are not allowed") %
network_uuid)
raise exc.HTTPBadRequest(explanation=expl)
networks.append((network_uuid, address))
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return networks
# NOTE(vish): Without this regex, b64decode will happily
# ignore illegal bytes in the base64 encoded
# data.
B64_REGEX = re.compile('^(?:[A-Za-z0-9+\/]{4})*'
'(?:[A-Za-z0-9+\/]{2}=='
'|[A-Za-z0-9+\/]{3}=)?$')
def _decode_base64(self, data):
data = re.sub(r'\s', '', data)
if not self.B64_REGEX.match(data):
return None
try:
return base64.b64decode(data)
except TypeError:
return None
def show(self, req, id):
"""Returns server details by server id."""
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, id,
want_objects=True,
expected_attrs=['pci_devices'])
req.cache_db_instance(instance)
return self._view_builder.show(req, instance)
@wsgi.response(202)
def create(self, req, body):
"""Creates a new server for a given user."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPBadRequest(_("The request body is invalid"))
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
if 'name' not in server_dict:
msg = _("Server name is not defined")
raise exc.HTTPBadRequest(explanation=msg)
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
# Arguments to be passed to instance create function
create_kwargs = {}
# Query extensions which want to manipulate the keyword
# arguments.
# NOTE(cyeoh): This is the hook that extensions use
# to replace the extension specific code below.
# When the extensions are ported this will also result
# in some convenience function from this class being
# moved to the extension
if list(self.create_extension_manager):
self.create_extension_manager.map(self._create_extension_point,
server_dict, create_kwargs)
image_uuid = self._image_from_req_data(server_dict, create_kwargs)
# NOTE(cyeoh): Although an extension can set
# return_reservation_id in order to request that a reservation
# id be returned to the client instead of the newly created
# instance information we do not want to pass this parameter
# to the compute create call which always returns both. We use
# this flag after the instance create call to determine what
# to return to the client
return_reservation_id = create_kwargs.pop('return_reservation_id',
False)
requested_networks = None
# TODO(cyeoh): bp v3-api-core-as-extensions
# Replace with an extension point when the os-networks
# extension is ported. Currently reworked
# to take into account is_neutron
#if (self.ext_mgr.is_loaded('os-networks')
# or utils.is_neutron()):
# requested_networks = server_dict.get('networks')
if utils.is_neutron():
requested_networks = server_dict.get('networks')
if requested_networks is not None:
requested_networks = self._get_requested_networks(
requested_networks)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _("Invalid flavor_ref provided.")
raise exc.HTTPBadRequest(explanation=msg)
try:
inst_type = flavors.get_flavor_by_flavor_id(
flavor_id, ctxt=context, read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=name,
metadata=server_dict.get('metadata', {}),
admin_password=password,
requested_networks=requested_networks,
**create_kwargs)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as error:
msg = _("Invalid flavor_ref provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound as error:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _("Invalid config_drive provided.")
raise exc.HTTPBadRequest(explanation=msg)
except messaging.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % unicode(error)
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.InvalidRequest,
exception.MultiplePortsNotApplicable,
exception.InstanceUserDataMalformed,
exception.PortNotFound,
exception.SecurityGroupNotFound,
exception.PortRequiresFixedIP,
exception.NetworkRequiresSubnet,
exception.NetworkNotFound) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
# If the caller wanted a reservation_id, return it
if return_reservation_id:
return wsgi.ResponseObject(
{'servers_reservation': {'reservation_id': resv_id}})
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['admin_password'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
def _create_extension_point(self, ext, server_dict, create_kwargs):
handler = ext.obj
LOG.debug(_("Running _create_extension_point for %s"), ext.obj)
handler.server_create(server_dict, create_kwargs)
def _rebuild_extension_point(self, ext, rebuild_dict, rebuild_kwargs):
handler = ext.obj
LOG.debug(_("Running _rebuild_extension_point for %s"), ext.obj)
handler.server_rebuild(rebuild_dict, rebuild_kwargs)
def _resize_extension_point(self, ext, resize_dict, resize_kwargs):
handler = ext.obj
LOG.debug(_("Running _resize_extension_point for %s"), ext.obj)
handler.server_resize(resize_dict, resize_kwargs)
def _update_extension_point(self, ext, update_dict, update_kwargs):
handler = ext.obj
LOG.debug(_("Running _update_extension_point for %s"), ext.obj)
handler.server_update(update_dict, update_kwargs)
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
try:
self.compute_api.soft_delete(context, instance)
except exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPBadRequest(_("The request body is invalid"))
ctxt = req.environ['nova.context']
update_dict = {}
if 'name' in body['server']:
name = body['server']['name']
self._validate_server_name(name)
update_dict['display_name'] = name.strip()
if 'host_id' in body['server']:
msg = _("host_id cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
if list(self.update_extension_manager):
self.update_extension_manager.map(self._update_extension_point,
body['server'], update_dict)
instance = common.get_instance(self.compute_api, ctxt, id,
want_objects=True,
expected_attrs=['pci_devices'])
try:
# NOTE(mikal): this try block needs to stay because save() still
# might throw an exception.
req.cache_db_instance(instance)
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
instance.save()
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(202)
@wsgi.action('confirm_resize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirm_resize')
@wsgi.response(202)
@wsgi.action('revert_resize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revert_resize')
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.action('reboot')
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
if not isinstance(body['reboot']['type'], six.string_types):
msg = _("Argument 'type' for reboot must be a string")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
valid_reboot_types = ['HARD', 'SOFT']
reboot_type = body['reboot']['type'].upper()
if not valid_reboot_types.count(reboot_type):
msg = _("Argument 'type' for reboot is not HARD or SOFT")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
msg = _("Missing argument 'type' for reboot")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot')
return webob.Response(status_int=202)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize')
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.response(204)
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete')
def _image_uuid_from_href(self, image_href):
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid image_ref provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
def _image_from_req_data(self, server_dict, create_kwargs):
"""Get image data from the request or raise appropriate
exceptions.
The field image_ref is mandatory when no block devices have been
defined and must be a proper uuid when present.
"""
image_href = server_dict.get('image_ref')
if not image_href and create_kwargs.get('block_device_mapping'):
return ''
elif image_href:
return self._image_uuid_from_href(unicode(image_href))
else:
msg = _("Missing image_ref attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _flavor_id_from_req_data(self, data):
try:
flavor_ref = data['server']['flavor_ref']
except (TypeError, KeyError):
msg = _("Missing flavor_ref attribute")
raise exc.HTTPBadRequest(explanation=msg)
return common.get_id_from_href(flavor_ref)
@wsgi.response(202)
@wsgi.action('resize')
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
resize_dict = body['resize']
try:
flavor_ref = str(resize_dict["flavor_ref"])
if not flavor_ref:
msg = _("Resize request has invalid 'flavor_ref' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
except (KeyError, TypeError):
msg = _("Resize requests require 'flavor_ref' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
resize_kwargs = {}
return self._resize(req, id, flavor_ref, **resize_kwargs)
@wsgi.response(202)
@wsgi.action('rebuild')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
rebuild_dict = body['rebuild']
try:
image_href = rebuild_dict["image_ref"]
except (KeyError, TypeError):
msg = _("Could not parse image_ref from request.")
raise exc.HTTPBadRequest(explanation=msg)
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(rebuild_dict)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {
'name': 'display_name',
'metadata': 'metadata',
}
rebuild_kwargs = {}
if 'name' in rebuild_dict:
self._validate_server_name(rebuild_dict['name'])
if 'preserve_ephemeral' in rebuild_dict:
rebuild_kwargs['preserve_ephemeral'] = strutils.bool_from_string(
rebuild_dict['preserve_ephemeral'], strict=True)
if list(self.rebuild_extension_manager):
self.rebuild_extension_manager.map(self._rebuild_extension_point,
rebuild_dict, rebuild_kwargs)
for request_attribute, instance_attribute in attr_map.items():
try:
rebuild_kwargs[instance_attribute] = rebuild_dict[
request_attribute]
except (KeyError, TypeError):
pass
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
**rebuild_kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild')
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
view = self._view_builder.show(req, instance)
# Add on the admin_password attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
view['server']['admin_password'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@wsgi.action('create_image')
@common.check_snapshots_enabled
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
entity = body.get("create_image", {})
image_name = entity.get("name")
if not image_name:
msg = _("create_image entity requires name attribute")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(context, req, id)
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance,
bdms):
img = instance['image_ref']
if not img:
props = bdms.root_metadata(
context, self.compute_api.image_service,
self.compute_api.volume_api)
image_meta = {'properties': props}
else:
src_image = self.compute_api.\
image_service.show(context, img)
image_meta = dict(src_image)
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_meta,
image_name,
extra_properties=props)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'create_image')
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# build location of newly-created image entity
image_id = str(image['id'])
image_ref = glance.generate_image_url(image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
password = server['admin_password']
self._validate_admin_password(password)
except KeyError:
password = utils.generate_password()
except ValueError:
raise exc.HTTPBadRequest(explanation=_("Invalid admin_password"))
return password
def _validate_admin_password(self, password):
if not isinstance(password, six.string_types):
raise ValueError()
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('reservation_id', 'name', 'status', 'image', 'flavor',
'ip', 'changes_since', 'all_tenants')
def _get_instance(self, context, instance_uuid):
try:
attrs = ['system_metadata', 'metadata']
return instance_obj.Instance.get_by_uuid(context, instance_uuid,
expected_attrs=attrs)
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
@extensions.expected_errors((404, 409))
@wsgi.action('start')
def _start_server(self, req, id, body):
"""Start an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorizer(context, instance, 'start')
LOG.debug(_('start instance'), instance=instance)
try:
self.compute_api.start(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked,
exception.InstanceInvalidState) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.action('stop')
def _stop_server(self, req, id, body):
"""Stop an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorizer(context, instance, 'stop')
LOG.debug(_('stop instance'), instance=instance)
try:
self.compute_api.stop(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked,
exception.InstanceInvalidState) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
LOG.debug(_("Removing options '%s' from query"),
", ".join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)
class Servers(extensions.V3APIExtensionBase):
"""Servers."""
name = "Servers"
alias = "servers"
version = 1
def get_resources(self):
member_actions = {'action': 'POST'}
collection_actions = {'detail': 'GET'}
resources = [
extensions.ResourceExtension(
'servers',
ServersController(extension_info=self.extension_info),
member_name='server', collection_actions=collection_actions,
member_actions=member_actions)]
return resources
def get_controller_extensions(self):
return []
|
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
from sqlalchemy import orm
from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api
from neutron.common import _deprecate
from neutron.common import constants
from neutron.db import _utils as db_utils
from neutron.db import api as db_api
from neutron.db import common_db_mixin as base_db
from neutron.db.models import l3 as l3_models
from neutron.db.models import metering as metering_models
from neutron.extensions import metering
_deprecate._moved_global('MeteringLabelRule', new_module=metering_models)
_deprecate._moved_global('MeteringLabel', new_module=metering_models)
class MeteringDbMixin(metering.MeteringPluginBase,
base_db.CommonDbMixin):
def __init__(self):
self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI()
@staticmethod
def _make_metering_label_dict(metering_label, fields=None):
res = {'id': metering_label['id'],
'name': metering_label['name'],
'description': metering_label['description'],
'shared': metering_label['shared'],
'tenant_id': metering_label['tenant_id']}
return db_utils.resource_fields(res, fields)
def create_metering_label(self, context, metering_label):
m = metering_label['metering_label']
with db_api.context_manager.writer.using(context):
metering_db = metering_models.MeteringLabel(
id=uuidutils.generate_uuid(),
description=m['description'],
tenant_id=m['tenant_id'],
name=m['name'],
shared=m['shared'])
context.session.add(metering_db)
return self._make_metering_label_dict(metering_db)
def delete_metering_label(self, context, label_id):
with db_api.context_manager.writer.using(context):
try:
label = self._get_by_id(context,
metering_models.MeteringLabel,
label_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelNotFound(label_id=label_id)
context.session.delete(label)
def get_metering_label(self, context, label_id, fields=None):
try:
metering_label = self._get_by_id(context,
metering_models.MeteringLabel,
label_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelNotFound(label_id=label_id)
return self._make_metering_label_dict(metering_label, fields)
def get_metering_labels(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'metering_labels', limit,
marker)
return self._get_collection(context, metering_models.MeteringLabel,
self._make_metering_label_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
@staticmethod
def _make_metering_label_rule_dict(metering_label_rule, fields=None):
res = {'id': metering_label_rule['id'],
'metering_label_id': metering_label_rule['metering_label_id'],
'direction': metering_label_rule['direction'],
'remote_ip_prefix': metering_label_rule['remote_ip_prefix'],
'excluded': metering_label_rule['excluded']}
return db_utils.resource_fields(res, fields)
def get_metering_label_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'metering_label_rules',
limit, marker)
return self._get_collection(context, metering_models.MeteringLabelRule,
self._make_metering_label_rule_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def get_metering_label_rule(self, context, rule_id, fields=None):
try:
metering_label_rule = self._get_by_id(
context, metering_models.MeteringLabelRule, rule_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
return self._make_metering_label_rule_dict(metering_label_rule, fields)
def _validate_cidr(self, context, label_id, remote_ip_prefix,
direction, excluded):
r_ips = self.get_metering_label_rules(context,
filters={'metering_label_id':
[label_id],
'direction':
[direction],
'excluded':
[excluded]},
fields=['remote_ip_prefix'])
cidrs = [r['remote_ip_prefix'] for r in r_ips]
new_cidr_ipset = netaddr.IPSet([remote_ip_prefix])
if (netaddr.IPSet(cidrs) & new_cidr_ipset):
raise metering.MeteringLabelRuleOverlaps(
remote_ip_prefix=remote_ip_prefix)
def create_metering_label_rule(self, context, metering_label_rule):
m = metering_label_rule['metering_label_rule']
try:
with db_api.context_manager.writer.using(context):
label_id = m['metering_label_id']
ip_prefix = m['remote_ip_prefix']
direction = m['direction']
excluded = m['excluded']
self._validate_cidr(context, label_id, ip_prefix, direction,
excluded)
metering_db = metering_models.MeteringLabelRule(
id=uuidutils.generate_uuid(),
metering_label_id=label_id,
direction=direction,
excluded=m['excluded'],
remote_ip_prefix=ip_prefix)
context.session.add(metering_db)
except db_exc.DBReferenceError:
raise metering.MeteringLabelNotFound(label_id=label_id)
return self._make_metering_label_rule_dict(metering_db)
def delete_metering_label_rule(self, context, rule_id):
with db_api.context_manager.writer.using(context):
try:
rule = self._get_by_id(context,
metering_models.MeteringLabelRule,
rule_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
context.session.delete(rule)
return self._make_metering_label_rule_dict(rule)
def _get_metering_rules_dict(self, metering_label):
rules = []
for rule in metering_label.rules:
rule_dict = self._make_metering_label_rule_dict(rule)
rules.append(rule_dict)
return rules
def _make_router_dict(self, router):
res = {'id': router['id'],
'name': router['name'],
'tenant_id': router['tenant_id'],
'admin_state_up': router['admin_state_up'],
'status': router['status'],
'gw_port_id': router['gw_port_id'],
constants.METERING_LABEL_KEY: []}
return res
def _process_sync_metering_data(self, context, labels):
all_routers = None
routers_dict = {}
for label in labels:
if label.shared:
if not all_routers:
all_routers = self._get_collection_query(context,
l3_models.Router)
routers = all_routers
else:
routers = label.routers
for router in routers:
router_dict = routers_dict.get(
router['id'],
self._make_router_dict(router))
rules = self._get_metering_rules_dict(label)
data = {'id': label['id'], 'rules': rules}
router_dict[constants.METERING_LABEL_KEY].append(data)
routers_dict[router['id']] = router_dict
return list(routers_dict.values())
def get_sync_data_for_rule(self, context, rule):
label = context.session.query(
metering_models.MeteringLabel).get(
rule['metering_label_id'])
if label.shared:
routers = self._get_collection_query(context, l3_models.Router)
else:
routers = label.routers
routers_dict = {}
for router in routers:
router_dict = routers_dict.get(router['id'],
self._make_router_dict(router))
data = {'id': label['id'], 'rule': rule}
router_dict[constants.METERING_LABEL_KEY].append(data)
routers_dict[router['id']] = router_dict
return list(routers_dict.values())
def get_sync_data_metering(self, context, label_id=None, router_ids=None):
labels = context.session.query(metering_models.MeteringLabel)
if label_id:
labels = labels.filter(
metering_models.MeteringLabel.id == label_id)
elif router_ids:
labels = (labels.join(metering_models.MeteringLabel.routers).
filter(l3_models.Router.id.in_(router_ids)))
return self._process_sync_metering_data(context, labels)
_deprecate._MovedGlobals()
|
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras layer to replace the Sequential Model object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from typing import Any, List, Mapping, Optional, Sequence, Text, Tuple, Union
import tensorflow as tf
from tf_agents.keras_layers import rnn_wrapper
from tf_agents.networks import network
from tf_agents.typing import types
def _infer_state_specs(
layers: Sequence[tf.keras.layers.Layer]
) -> Tuple[types.NestedTensorSpec, List[bool]]:
"""Infer the state spec of a sequence of keras Layers and Networks.
Args:
layers: A list of Keras layers and Network.
Returns:
A tuple with `state_spec`, a tuple of the state specs of length
`len(layers)` and a list of bools indicating if the corresponding layer
has lists in it's state.
"""
state_specs = []
layer_state_is_list = []
for layer in layers:
spec = network.get_state_spec(layer)
if isinstance(spec, list):
layer_state_is_list.append(True)
state_specs.append(tuple(spec))
else:
state_specs.append(spec)
layer_state_is_list.append(False)
return tuple(state_specs), layer_state_is_list
class Sequential(network.Network):
"""The Sequential Network represents a sequence of Keras layers.
It is a TF-Agents network that should be used instead of
tf.keras.layers.Sequential. In contrast to keras Sequential, this layer can be
used as a pure Layer in tf.functions and when exporting SavedModels, without
having to pre-declare input and output shapes. In turn, this layer is usable
as a preprocessing layer for TF Agents Networks, and can be exported via
PolicySaver.
Stateful Keras layers (e.g. LSTMCell, RNN, LSTM, TF-Agents DynamicUnroll)
are all supported. The `state_spec` of `Sequential` is a tuple whose
length matches the number of stateful layers passed. If no stateful layers
or networks are passed to `Sequential` then `state_spec == ()`. Given that
the replay buffers do not support specs with lists due to tf.nest vs
tf.data.nest conflicts `Sequential` will also guarantee that all specs do not
contain lists.
Usage:
```python
c = Sequential([layer1, layer2, layer3])
output, next_state = c(inputs, state)
```
"""
def __init__(self,
layers: Sequence[tf.keras.layers.Layer],
input_spec: Optional[types.NestedTensorSpec] = None,
name: Optional[Text] = None):
"""Create a Sequential Network.
Args:
layers: A list or tuple of layers to compose. Any layers that
are subclasses of `tf.keras.layers.{RNN,LSTM,GRU,...}` are
wrapped in `tf_agents.keras_layers.RNNWrapper`.
input_spec: (Optional.) A nest of `tf.TypeSpec` representing the
input observations to the first layer.
name: (Optional.) Network name.
Raises:
ValueError: If `layers` is empty.
ValueError: If `layers[0]` is a generic Keras layer (not a TF-Agents
network) and `input_spec is None`.
TypeError: If any of the layers are not instances of keras `Layer`.
"""
if not layers:
raise ValueError(
'`layers` must not be empty; saw: {}'.format(layers))
for layer in layers:
if not isinstance(layer, tf.keras.layers.Layer):
raise TypeError(
'Expected all layers to be instances of keras Layer, but saw'
': \'{}\''.format(layer))
layers = [
rnn_wrapper.RNNWrapper(layer) if isinstance(layer, tf.keras.layers.RNN)
else layer
for layer in layers
]
state_spec, self._layer_state_is_list = _infer_state_specs(layers)
# Now we remove all of the empty state specs so if there are no RNN layers,
# our state spec is empty. layer_has_state is a list of bools telling us
# which layers have a non-empty state and which don't.
flattened_specs = [tf.nest.flatten(s) for s in state_spec]
layer_has_state = [bool(fs) for fs in flattened_specs]
state_spec = tuple(
s for s, has_state in zip(state_spec, layer_has_state) if has_state)
super(Sequential, self).__init__(input_tensor_spec=input_spec,
state_spec=state_spec,
name=name)
self._sequential_layers = layers
self._layer_has_state = layer_has_state
@property
def layers(self) -> List[tf.keras.layers.Layer]:
# Return a shallow copy so users don't modify the layers list.
return copy.copy(self._sequential_layers)
def copy(self, **kwargs) -> 'Sequential':
"""Make a copy of a `Sequential` instance.
**NOTE** A copy of a `Sequential` instance always performs a deep copy
of the underlying layers, so the new instance will not share weights
with the original - but it will start with the same weights.
Args:
**kwargs: Args to override when recreating this network. Commonly
overridden args include 'name'.
Returns:
A deep copy of this network.
Raises:
RuntimeError: If not `tf.executing_eagerly()`; as this is required to
be able to create deep copies of layers in `layers`.
"""
if not tf.executing_eagerly():
raise RuntimeError(
'Not executing eagerly - cannot make deep copies of `layers`.')
new_kwargs = dict(self._saved_kwargs, **kwargs)
if 'layers' not in kwargs:
new_layers = [copy.deepcopy(l) for l in self.layers]
new_kwargs['layers'] = new_layers
return type(self)(**new_kwargs)
def call(self, inputs, network_state=(), **kwargs):
if not tf.is_tensor(network_state) and not network_state:
network_state = ((),) * len(self.state_spec)
next_network_state = [()] * len(self.state_spec)
# Only Networks are expected to know about step_type; not Keras layers.
layer_kwargs = kwargs.copy()
layer_kwargs.pop('step_type', None)
stateful_layer_idx = 0
for i, layer in enumerate(self.layers):
if isinstance(layer, network.Network):
if self._layer_has_state[i]:
input_state = network_state[stateful_layer_idx]
if input_state is not None and self._layer_state_is_list[i]:
input_state = list(input_state)
inputs, next_state = layer(
inputs,
network_state=network_state[stateful_layer_idx],
**kwargs)
if self._layer_state_is_list[i]:
next_network_state[stateful_layer_idx] = tuple(next_state)
else:
next_network_state[stateful_layer_idx] = next_state
stateful_layer_idx += 1
else:
inputs, _ = layer(inputs, **kwargs)
else:
# Generic Keras layer
if self._layer_has_state[i]:
# The layer maintains state. If a state was provided at input to
# `call`, then use it. Otherwise ask for an initial state.
maybe_network_state = network_state[stateful_layer_idx]
input_state = maybe_network_state
# pylint: disable=literal-comparison
if maybe_network_state is None:
input_state = layer.get_initial_state(inputs)
elif input_state is not () and self._layer_state_is_list[i]:
input_state = list(input_state)
# pylint: enable=literal-comparison
outputs = layer(inputs, input_state, **layer_kwargs)
inputs, next_state = outputs
if self._layer_state_is_list[i]:
next_network_state[stateful_layer_idx] = tuple(next_state)
else:
next_network_state[stateful_layer_idx] = next_state
stateful_layer_idx += 1
else:
# Does not maintain state.
inputs = layer(inputs, **layer_kwargs)
return inputs, tuple(next_network_state)
def compute_output_shape(
self,
input_shape: Union[List[int], Tuple[int], tf.TensorShape]) -> (
tf.TensorShape):
output_shape = tf.TensorShape(input_shape)
for l in self._sequential_layers:
output_shape = l.compute_output_shape(output_shape)
return tf.TensorShape(output_shape)
def compute_output_signature(
self, input_signature: types.NestedSpec) -> types.NestedSpec:
output_signature = input_signature
for l in self._sequential_layers:
output_signature = l.compute_output_signature(output_signature)
return output_signature
@property
def trainable_weights(self) -> List[tf.Variable]:
if not self.trainable:
return []
weights = {}
for l in self._sequential_layers:
for v in l.trainable_weights:
weights[id(v)] = v
return list(weights.values())
@property
def non_trainable_weights(self) -> List[tf.Variable]:
weights = {}
for l in self._sequential_layers:
for v in l.non_trainable_weights:
weights[id(v)] = v
return list(weights.values())
@property
def trainable(self) -> bool:
return any([l.trainable for l in self._sequential_layers])
@trainable.setter
def trainable(self, value: bool):
for l in self._sequential_layers:
l.trainable = value
def get_config(self) -> Mapping[int, Mapping[str, Any]]:
config = {}
for i, layer in enumerate(self._sequential_layers):
config[i] = {
'class_name': layer.__class__.__name__,
'config': copy.deepcopy(layer.get_config())
}
return config
@classmethod
def from_config(cls, config, custom_objects=None) -> 'Sequential':
layers = [
tf.keras.layers.deserialize(conf, custom_objects=custom_objects)
for conf in config.values()
]
return cls(layers)
tf.keras.utils.get_custom_objects()['SequentialNetwork'] = Sequential
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class IpAllocationsOperations(object):
"""IpAllocationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified IpAllocation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_allocation_name: The name of the IpAllocation.
:type ip_allocation_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ip_allocation_name=ip_allocation_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.IpAllocation"
"""Gets the specified IpAllocation by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_allocation_name: The name of the IpAllocation.
:type ip_allocation_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IpAllocation, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.IpAllocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IpAllocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
parameters, # type: "_models.IpAllocation"
**kwargs # type: Any
):
# type: (...) -> "_models.IpAllocation"
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'IpAllocation')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('IpAllocation', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('IpAllocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
parameters, # type: "_models.IpAllocation"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.IpAllocation"]
"""Creates or updates an IpAllocation in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_allocation_name: The name of the IpAllocation.
:type ip_allocation_name: str
:param parameters: Parameters supplied to the create or update virtual network operation.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.IpAllocation
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either IpAllocation or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.IpAllocation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ip_allocation_name=ip_allocation_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('IpAllocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.IpAllocation"
"""Updates a IpAllocation tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_allocation_name: The name of the IpAllocation.
:type ip_allocation_name: str
:param parameters: Parameters supplied to update IpAllocation tags.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IpAllocation, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.IpAllocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IpAllocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.IpAllocationListResult"]
"""Gets all IpAllocations in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IpAllocationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.IpAllocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IpAllocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/IpAllocations'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.IpAllocationListResult"]
"""Gets all IpAllocations in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IpAllocationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.IpAllocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IpAllocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations'} # type: ignore
|
|
from __future__ import division, print_function, absolute_import
import os
import numpy as np
from numpy import array, asarray, pi, sin, cos, arange, dot, ravel, sqrt, round
from numpy.testing import (assert_equal, assert_allclose, assert_,
TestCase, run_module_suite, assert_almost_equal,
assert_raises, assert_array_almost_equal)
from scipy import interpolate
from scipy.interpolate.fitpack import (splrep, splev, bisplrep, bisplev,
sproot, splprep, splint, spalde, splder, splantider, insert, dblint)
def data_file(basename):
return os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', basename)
def norm2(x):
return sqrt(dot(x.T, x))
def f1(x, d=0):
if d is None:
return "sin"
if x is None:
return "sin(x)"
if d % 4 == 0:
return sin(x)
if d % 4 == 1:
return cos(x)
if d % 4 == 2:
return -sin(x)
if d % 4 == 3:
return -cos(x)
def f2(x, y=0, dx=0, dy=0):
if x is None:
return "sin(x+y)"
d = dx + dy
if d % 4 == 0:
return sin(x + y)
if d % 4 == 1:
return cos(x + y)
if d % 4 == 2:
return -sin(x + y)
if d % 4 == 3:
return -cos(x + y)
def makepairs(x, y):
"""Helper function to create an array of pairs of x and y."""
# Or itertools.product (>= python 2.6)
xy = array([[a, b] for a in asarray(x) for b in asarray(y)])
return xy.T
def put(*a):
"""Produce some output if file run directly"""
import sys
if hasattr(sys.modules['__main__'], '__put_prints'):
sys.stderr.write("".join(map(str, a)) + "\n")
class TestSmokeTests(TestCase):
"""
Smoke tests (with a few asserts) for fitpack routines -- mostly
check that they are runnable
"""
def check_1(self, f=f1, per=0, s=0, a=0, b=2 * pi, N=20, at=0, xb=None, xe=None):
if xb is None:
xb = a
if xe is None:
xe = b
x = a + (b - a) * arange(N + 1, dtype=float) / float(N) # nodes
x1 = a + (b - a) * arange(1, N, dtype=float) / float(N - 1) # middle points of the nodes
v, v1 = f(x), f(x1)
nk = []
def err_est(k, d):
# Assume f has all derivatives < 1
h = 1.0 / float(N)
tol = 5 * h ** (.75 * (k - d))
if s > 0:
tol += 1e5 * s
return tol
for k in range(1, 6):
tck = splrep(x, v, s=s, per=per, k=k, xe=xe)
if at:
t = tck[0][k:-k]
else:
t = x1
nd = []
for d in range(k + 1):
tol = err_est(k, d)
err = norm2(f(t, d) - splev(t, tck, d)) / norm2(f(t, d))
assert_(err < tol, (k, d, err, tol))
nd.append((err, tol))
nk.append(nd)
put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]" % (f(None),
repr(round(xb, 3)), repr(round(xe, 3)),
repr(round(a, 3)), repr(round(b, 3))))
if at:
str = "at knots"
else:
str = "at the middle of nodes"
put(" per=%d s=%s Evaluation %s" % (per, repr(s), str))
put(" k : |f-s|^2 |f'-s'| |f''-.. |f'''-. |f''''- |f'''''")
k = 1
for l in nk:
put(' %d : ' % k)
for r in l:
put(' %.1e %.1e' % r)
put('\n')
k = k + 1
def check_2(self, f=f1, per=0, s=0, a=0, b=2 * pi, N=20, xb=None, xe=None,
ia=0, ib=2 * pi, dx=0.2 * pi):
if xb is None:
xb = a
if xe is None:
xe = b
x = a + (b - a) * arange(N + 1, dtype=float) / float(N) # nodes
v = f(x)
def err_est(k, d):
# Assume f has all derivatives < 1
h = 1.0 / float(N)
tol = 5 * h ** (.75 * (k - d))
if s > 0:
tol += 1e5 * s
return tol
nk = []
for k in range(1, 6):
tck = splrep(x, v, s=s, per=per, k=k, xe=xe)
nk.append([splint(ia, ib, tck), spalde(dx, tck)])
put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]" % (f(None),
repr(round(xb, 3)), repr(round(xe, 3)),
repr(round(a, 3)), repr(round(b, 3))))
put(" per=%d s=%s N=%d [a, b] = [%s, %s] dx=%s" % (
per, repr(s), N, repr(round(ia, 3)), repr(round(ib, 3)), repr(round(dx, 3))))
put(" k : int(s,[a,b]) Int.Error Rel. error of s^(d)(dx) d = 0, .., k")
k = 1
for r in nk:
if r[0] < 0:
sr = '-'
else:
sr = ' '
put(" %d %s%.8f %.1e " % (k, sr, abs(r[0]),
abs(r[0] - (f(ib, -1) - f(ia, -1)))))
d = 0
for dr in r[1]:
err = abs(1 - dr / f(dx, d))
tol = err_est(k, d)
assert_(err < tol, (k, d))
put(" %.1e %.1e" % (err, tol))
d = d + 1
put("\n")
k = k + 1
def check_3(self, f=f1, per=0, s=0, a=0, b=2 * pi, N=20, xb=None, xe=None,
ia=0, ib=2 * pi, dx=0.2 * pi):
if xb is None:
xb = a
if xe is None:
xe = b
x = a + (b - a) * arange(N + 1, dtype=float) / float(N) # nodes
v = f(x)
put(" k : Roots of s(x) approx %s x in [%s,%s]:" %
(f(None), repr(round(a, 3)), repr(round(b, 3))))
for k in range(1, 6):
tck = splrep(x, v, s=s, per=per, k=k, xe=xe)
if k == 3:
roots = sproot(tck)
assert_allclose(splev(roots, tck), 0, atol=1e-10, rtol=1e-10)
assert_allclose(roots, pi * array([1, 2, 3, 4]), rtol=1e-3)
put(' %d : %s' % (k, repr(roots.tolist())))
else:
assert_raises(ValueError, sproot, tck)
def check_4(self, f=f1, per=0, s=0, a=0, b=2 * pi, N=20, xb=None, xe=None,
ia=0, ib=2 * pi, dx=0.2 * pi):
if xb is None:
xb = a
if xe is None:
xe = b
x = a + (b - a) * arange(N + 1, dtype=float) / float(N) # nodes
x1 = a + (b - a) * arange(1, N, dtype=float) / float(N - 1) # middle points of the nodes
v, v1 = f(x), f(x1)
put(" u = %s N = %d" % (repr(round(dx, 3)), N))
put(" k : [x(u), %s(x(u))] Error of splprep Error of splrep " % (f(0, None)))
for k in range(1, 6):
tckp, u = splprep([x, v], s=s, per=per, k=k, nest=-1)
tck = splrep(x, v, s=s, per=per, k=k)
uv = splev(dx, tckp)
err1 = abs(uv[1] - f(uv[0]))
err2 = abs(splev(uv[0], tck) - f(uv[0]))
assert_(err1 < 1e-2)
assert_(err2 < 1e-2)
put(" %d : %s %.1e %.1e" %
(k, repr([round(z, 3) for z in uv]),
err1,
err2))
put("Derivatives of parametric cubic spline at u (first function):")
k = 3
tckp, u = splprep([x, v], s=s, per=per, k=k, nest=-1)
for d in range(1, k + 1):
uv = splev(dx, tckp, d)
put(" %s " % (repr(uv[0])))
def check_5(self, f=f2, kx=3, ky=3, xb=0, xe=2 * pi, yb=0, ye=2 * pi, Nx=20, Ny=20, s=0):
x = xb + (xe - xb) * arange(Nx + 1, dtype=float) / float(Nx)
y = yb + (ye - yb) * arange(Ny + 1, dtype=float) / float(Ny)
xy = makepairs(x, y)
tck = bisplrep(xy[0], xy[1], f(xy[0], xy[1]), s=s, kx=kx, ky=ky)
tt = [tck[0][kx:-kx], tck[1][ky:-ky]]
t2 = makepairs(tt[0], tt[1])
v1 = bisplev(tt[0], tt[1], tck)
v2 = f2(t2[0], t2[1])
v2.shape = len(tt[0]), len(tt[1])
err = norm2(ravel(v1 - v2))
assert_(err < 1e-2, err)
put(err)
def test_smoke_splrep_splev(self):
put("***************** splrep/splev")
self.check_1(s=1e-6)
self.check_1()
self.check_1(at=1)
self.check_1(per=1)
self.check_1(per=1, at=1)
self.check_1(b=1.5 * pi)
self.check_1(b=1.5 * pi, xe=2 * pi, per=1, s=1e-1)
def test_smoke_splint_spalde(self):
put("***************** splint/spalde")
self.check_2()
self.check_2(per=1)
self.check_2(ia=0.2 * pi, ib=pi)
self.check_2(ia=0.2 * pi, ib=pi, N=50)
def test_smoke_sproot(self):
put("***************** sproot")
self.check_3(a=0.1, b=15)
def test_smoke_splprep_splrep_splev(self):
put("***************** splprep/splrep/splev")
self.check_4()
self.check_4(N=50)
def test_smoke_bisplrep_bisplev(self):
put("***************** bisplev")
self.check_5()
class TestSplev(TestCase):
def test_1d_shape(self):
x = [1, 2, 3, 4, 5]
y = [4, 5, 6, 7, 8]
tck = splrep(x, y)
z = splev([1], tck)
assert_equal(z.shape, (1,))
z = splev(1, tck)
assert_equal(z.shape, ())
def test_2d_shape(self):
x = [1, 2, 3, 4, 5]
y = [4, 5, 6, 7, 8]
tck = splrep(x, y)
t = np.array([[1.0, 1.5, 2.0, 2.5],
[3.0, 3.5, 4.0, 4.5]])
z = splev(t, tck)
z0 = splev(t[0], tck)
z1 = splev(t[1], tck)
assert_equal(z, np.row_stack((z0, z1)))
def test_extrapolation_modes(self):
# test extrapolation modes
# * if ext=0, return the extrapolated value.
# * if ext=1, return 0
# * if ext=2, raise a ValueError
# * if ext=3, return the boundary value.
x = [1, 2, 3]
y = [0, 2, 4]
tck = splrep(x, y, k=1)
rstl = [[-2, 6], [0, 0], None, [0, 4]]
for ext in (0, 1, 3):
assert_array_almost_equal(splev([0, 4], tck, ext=ext), rstl[ext])
assert_raises(ValueError, splev, [0, 4], tck, ext=2)
class TestSplder(object):
def __init__(self):
# non-uniform grid, just to make it sure
x = np.linspace(0, 1, 100) ** 3
y = np.sin(20 * x)
self.spl = splrep(x, y)
# double check that knots are non-uniform
assert_(np.diff(self.spl[0]).ptp() > 0)
def test_inverse(self):
# Check that antiderivative + derivative is identity.
for n in range(5):
spl2 = splantider(self.spl, n)
spl3 = splder(spl2, n)
assert_allclose(self.spl[0], spl3[0])
assert_allclose(self.spl[1], spl3[1])
assert_equal(self.spl[2], spl3[2])
def test_splder_vs_splev(self):
# Check derivative vs. FITPACK
for n in range(3 + 1):
# Also extrapolation!
xx = np.linspace(-1, 2, 2000)
if n == 3:
# ... except that FITPACK extrapolates strangely for
# order 0, so let's not check that.
xx = xx[(xx >= 0) & (xx <= 1)]
dy = splev(xx, self.spl, n)
spl2 = splder(self.spl, n)
dy2 = splev(xx, spl2)
if n == 1:
assert_allclose(dy, dy2, rtol=2e-6)
else:
assert_allclose(dy, dy2)
def test_splantider_vs_splint(self):
# Check antiderivative vs. FITPACK
spl2 = splantider(self.spl)
# no extrapolation, splint assumes function is zero outside
# range
xx = np.linspace(0, 1, 20)
for x1 in xx:
for x2 in xx:
y1 = splint(x1, x2, self.spl)
y2 = splev(x2, spl2) - splev(x1, spl2)
assert_allclose(y1, y2)
def test_order0_diff(self):
assert_raises(ValueError, splder, self.spl, 4)
def test_kink(self):
# Should refuse to differentiate splines with kinks
spl2 = insert(0.5, self.spl, m=2)
splder(spl2, 2) # Should work
assert_raises(ValueError, splder, spl2, 3)
spl2 = insert(0.5, self.spl, m=3)
splder(spl2, 1) # Should work
assert_raises(ValueError, splder, spl2, 2)
spl2 = insert(0.5, self.spl, m=4)
assert_raises(ValueError, splder, spl2, 1)
def test_multidim(self):
# c can have trailing dims
for n in range(3):
t, c, k = self.spl
c2 = np.c_[c, c, c]
c2 = np.dstack((c2, c2))
spl2 = splantider((t, c2, k), n)
spl3 = splder(spl2, n)
assert_allclose(t, spl3[0])
assert_allclose(c2, spl3[1])
assert_equal(k, spl3[2])
class TestBisplrep(object):
def test_overflow(self):
a = np.linspace(0, 1, 620)
b = np.linspace(0, 1, 620)
x, y = np.meshgrid(a, b)
z = np.random.rand(*x.shape)
assert_raises(OverflowError, bisplrep, x.ravel(), y.ravel(), z.ravel(), s=0)
def test_regression_1310(self):
# Regression test for gh-1310
data = np.load(data_file('bug-1310.npz'))['data']
# Shouldn't crash -- the input data triggers work array sizes
# that caused previously some data to not be aligned on
# sizeof(double) boundaries in memory, which made the Fortran
# code to crash when compiled with -O3
bisplrep(data[:, 0], data[:, 1], data[:, 2], kx=3, ky=3, s=0,
full_output=True)
def test_dblint():
# Basic test to see it runs and gives the correct result on a trivial
# problem. Note that `dblint` is not exposed in the interpolate namespace.
x = np.linspace(0, 1)
y = np.linspace(0, 1)
xx, yy = np.meshgrid(x, y)
rect = interpolate.RectBivariateSpline(x, y, 4 * xx * yy)
tck = list(rect.tck)
tck.extend(rect.degrees)
assert_almost_equal(dblint(0, 1, 0, 1, tck), 1)
assert_almost_equal(dblint(0, 0.5, 0, 1, tck), 0.25)
assert_almost_equal(dblint(0.5, 1, 0, 1, tck), 0.75)
assert_almost_equal(dblint(-100, 100, -100, 100, tck), 1)
def test_splev_der_k():
# regression test for gh-2188: splev(x, tck, der=k) gives garbage or crashes
# for x outside of knot range
# test case from gh-2188
tck = (np.array([0., 0., 2.5, 2.5]),
np.array([-1.56679978, 2.43995873, 0., 0.]),
1)
t, c, k = tck
x = np.array([-3, 0, 2.5, 3])
# an explicit form of the linear spline
assert_allclose(splev(x, tck), c[0] + (c[1] - c[0]) * x / t[2])
assert_allclose(splev(x, tck, 1), (c[1] - c[0]) / t[2])
# now check a random spline vs splder
np.random.seed(1234)
x = np.sort(np.random.random(30))
y = np.random.random(30)
t, c, k = splrep(x, y)
x = [t[0] - 1., t[-1] + 1.]
tck2 = splder((t, c, k), k)
assert_allclose(splev(x, (t, c, k), k), splev(x, tck2))
if __name__ == "__main__":
run_module_suite()
|
|
from scipy.sparse import csr_matrix, coo_matrix, diags
from scipy.sparse import isspmatrix
import random
class WordSaladMatrixBuilder():
"""Aids in the construction of a WordSaladMatrix. The WordSaladMatrix object
has some finicky requirements and this object helps construct one in a
reasonably efficient manner.
It uses a sparse COO matrix to construct the final sparse matrix which can
then be used to find word follower probabilities."""
def __init__(self):
self.words = dict()
self.c = 0
self.row = []
self.col = []
self.data = []
def add_word(self, w):
if w not in self.words:
self.words[w] = self.c
#self.row.append(self.c)
#self.col.append(self.c)
#self.data.append(0)
self.c += 1
return self.c - 1
else:
return self.words[w]
def count_follower(self, w, f):
i = self.add_word(w)
j = self.add_word(f)
self.row.append(i)
self.col.append(j)
self.data.append(1)
def build_matrix(self):
m = coo_matrix((self.data, (self.row, self.col)), shape=(self.c, self.c))
m.sum_duplicates()
m = m.tocsr()
# Get row sums as a row matrix, and convert to float (default is int).
sums = m.sum(axis=1).astype("f")
# Get the reciprocal of each element.
for i in range(0, sums.shape[0]):
if sums[i, 0] > 0.0:
sums[i, 0] = 1.0 / sums[i, 0]
else:
sums[i, 0] = 0.0
# Create a diagonal matrix (scales rows on left-multiply) of sums
# When we multiply we will normalize each row so it becomes a
# weighted sum instead, and in our case a probability vector for a
# certain word.
sums = diags(sums.flat, 0, shape=m.shape)
return WordSaladMatrix(sums * m, self.words)
class WordSaladMatrix:
"""The WordSaladMatrix is a matrix (and a table) of "words" and their
associated "followers", encoding a Markov chain for them.
A word does not have to be an actual english word, it can be anything
hashable by Python. This is useful for tracking pairs of words for
instance, by inserting tuples instead of single strings. But it can also
be numbers, letters or anything else that can vaguely be emulated by
a Markov chain.
A follower is simply another "word" that follows the "word" in question,
the amount of time a word is followed by another is what is encoded by the
matrix.
The underlying matrix is sparse with the motivation that since a structure
is expected, a great deal of followers will have probability zero.
"""
def __init__(self, freqmatrix, wordtoindex):
if not isspmatrix(freqmatrix):
raise TypeError("freqmatrix must be a scipy sparse matrix, is type {}.".format(type(freqmatrix)))
self.matrix = freqmatrix
# Bijection word -> index
self.wordtoindex = dict(wordtoindex)
# The inverse of the bijection word -> index
self.indextoword = {i:w for w,i in self.wordtoindex.items()}
if self.matrix.shape[0] != self.matrix.shape[1]:
raise ValueError("Needs a square matrix.")
if len(self.wordtoindex) != self.matrix.shape[0]:
raise ValueError("length of wordtoindex does not match dimension of matrix.")
def __contains__(self, w):
return w in self.wordtoindex
def indexOf(self, w):
return self.wordtoindex[w]
def wordAt(self, i):
return self.indextoword[i]
def wordCount(self):
return len(self.wordtoindex)
def probability(self, w, f):
"""Returns the probability that a word w is followed by word f."""
if w not in self.wordtoindex or f not in self.wordtoindex:
raise ValueError("w or f is not in the matrix.")
i = self.wordtoindex[w]
j = self.wordtoindex[f]
return self.matrix[i, j]
def probabilities(self, w):
"""Returns the probability vector for a word. This contains as many
elements as there are words encoded in the matrix.
Each index has a bijective relation to a word."""
if w not in self.wordtoindex:
raise ValueError("w is not in the matrix.")
return self.matrix.getrow(self.wordtoindex[w])
def power(self, n):
"""Raises the probability matrix by integer n.
This can be used to find out what the probabilities are after n words.
This is usually pretty CPU-intensive, depending on the size of the
matrix.
"""
n = int(n)
self.matrix **= n
def __repr__(self):
return "<WordSaladMatrix with matrix shape {}>".format(self.matrix.shape)
def draw_follower(mat, w):
probs = mat.probabilities(w).tocoo() # Use a COO, lets us iterate better.
p = random.uniform(0.01, 1.0)
f = -1
for i,p1 in zip(probs.col, probs.data):
if p1 != 0.0:
p -= p1
if p <= 0.0:
f = i
break
if f == -1:
return None
return mat.wordAt(i)
import string
def parseTextIntoMatrix(text, matrixbuilder, groupSize=1, sentenceStop=".!?", strip={"\r"," ", "\n", "\t"}, whitespace=string.whitespace, punctuation=string.punctuation, startGroups=None):
if groupSize < 1:
raise ValueError("groupSize must be >= 1")
if len(whitespace) < 1:
raise ValueError("whitespace list is empty.")
def tagStartGroup(x):
if startGroups is not None:
startGroups.append(x)
def split(txt):
last = ""
prev = ""
for c in txt:
if c in whitespace:
if prev in whitespace:
continue
else:
yield last
last = ""
elif c in punctuation:
yield last
yield c
last = ""
else:
last += c
prev = c
rawTokens = split(text)
def tokens(raw):
for r in raw:
if r in strip or r in whitespace:
continue
yield r
prev = None
group = []
def popgroup():
nonlocal prev, group
if groupSize == 1:
g = group[0]
group = []
return g
else:
tg = tuple(group)
group = []
return tg
for t in tokens(rawTokens):
group.append(t)
if len(group) == groupSize:
tg = popgroup()
matrixbuilder.count_follower(prev, tg)
if prev is None or any((p in sentenceStop for p in prev)): tagStartGroup(tg)
prev = tg
if group is not []:
matrixbuilder.count_follower(prev, tuple(group))
def joinSpecial(iterable, noprespace=".,:;)-!?'\"", nopostspace="(-'\"", humanize=True, stops="?!."):
fin = ""
prev = stops
for s in iterable:
if prev in stops:
s = s.capitalize()
if s in noprespace or prev in nopostspace:
fin += s
else:
fin += " " + s
prev = s
return fin
def test():
mb = WordSaladMatrixBuilder()
import re
ss = None
with open("test.txt", "rb") as f:
ss = f.read()
ss = ss.decode("utf-8")
ss = re.sub("[0-9]", "", ss)
startGroups = []
parseTextIntoMatrix(ss, mb, groupSize=2, startGroups=startGroups)
m = mb.build_matrix()
print(m)
for i in range(0, 1):
prev = random.choice(startGroups)
s = []
while "." not in prev:
s += prev
prev = draw_follower(m, prev)
if prev is None:
break
#m.power(2)
print(joinSpecial(map(lambda x: "" if x is None else x, s)))
#import cProfile
#cProfile.run("test()", sort="cumtime")
test()
|
|
import pytest
import operator
import numpy as np
from numpy.testing import assert_allclose
import scipy.sparse.linalg as spla
import quimb as qu
import quimb.tensor as qtn
from quimb.tensor import (
bonds,
tensor_contract,
tensor_direct_product,
Tensor,
TensorNetwork,
rand_tensor,
MPS_rand_state,
TNLinearOperator1D,
)
from quimb.tensor.decomp import _trim_singular_vals
from quimb.tensor.tensor_core import _CONTRACT_BACKEND, _TENSOR_LINOP_BACKEND
def test__trim_singular_vals():
s = np.array([3., 2., 1., 0.1])
assert _trim_singular_vals(s, 0.5, 1) == 3
assert _trim_singular_vals(s, 0.5, 2) == 2
assert _trim_singular_vals(s, 2, 3) == 2
assert _trim_singular_vals(s, 5.02, 3) == 1
class TestContractOpts:
def test_contract_strategy(self):
assert qtn.get_contract_strategy() == 'greedy'
with qtn.contract_strategy('auto'):
assert qtn.get_contract_strategy() == 'auto'
assert qtn.get_contract_strategy() == 'greedy'
def test_contract_backend(self):
assert qtn.get_contract_backend() == _CONTRACT_BACKEND
with qtn.contract_backend('cupy'):
assert qtn.get_contract_backend() == 'cupy'
assert qtn.get_contract_backend() == _CONTRACT_BACKEND
def test_tensor_linop_backend(self):
assert qtn.get_tensor_linop_backend() == _TENSOR_LINOP_BACKEND
with qtn.tensor_linop_backend('cupy'):
assert qtn.get_tensor_linop_backend() == 'cupy'
assert qtn.get_tensor_linop_backend() == _TENSOR_LINOP_BACKEND
class TestBasicTensorOperations:
def test_tensor_construct(self):
x = np.random.randn(2, 3, 4)
a = Tensor(x, inds=[0, 1, 2], tags='blue')
assert_allclose(a.H.data, x.conj())
assert a.size == 24
with pytest.raises(ValueError):
Tensor(x, inds=[0, 2], tags='blue')
def test_tensor_copy(self):
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2], tags='blue')
b = a.copy()
b.add_tag('foo')
assert 'foo' not in a.tags
b.data[:] = b.data / 2
# still reference the same underlying array
assert_allclose(a.data, b.data)
def test_tensor_deep_copy(self):
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2], tags='blue')
b = a.copy(deep=True)
b.add_tag('foo')
assert 'foo' not in a.tags
b.data[:] = b.data / 2
# still reference the same underlying array
assert_allclose(a.data / 2, b.data)
def test_with_alpha_construct(self):
x = np.random.randn(2, 3, 4)
a = Tensor(x, inds='ijk', tags='blue')
assert_allclose(a.H.data, x.conj())
assert a.size == 24
with pytest.raises(ValueError):
Tensor(x, inds='ij', tags='blue')
x = np.random.randn(2, 3, 4)
a = Tensor(x, inds=['a1', 'b2', 'c3'], tags='blue')
assert_allclose(a.H.data, x.conj())
assert a.size == 24
with pytest.raises(ValueError):
Tensor(x, inds=['ijk'], tags='blue')
def test_arithmetic_scalar(self):
x = np.random.randn(2, 3, 4)
a = Tensor(x, inds=[0, 1, 2], tags='blue')
assert_allclose((a + 2).data, x + 2)
assert_allclose((a - 3).data, x - 3)
assert_allclose((a * 4).data, x * 4)
assert_allclose((a / 5).data, x / 5)
assert_allclose((a ** 2).data, x ** 2)
assert_allclose((2 + a).data, 2 + x)
assert_allclose((3 - a).data, 3 - x)
assert_allclose((4 * a).data, 4 * x)
assert_allclose((5 / a).data, 5 / x)
assert_allclose((5 ** a).data, 5 ** x)
@pytest.mark.parametrize("op", [operator.__add__,
operator.__sub__,
operator.__mul__,
operator.__pow__,
operator.__truediv__])
@pytest.mark.parametrize("mismatch", (True, False))
def test_tensor_tensor_arithmetic(self, op, mismatch):
a = Tensor(np.random.rand(2, 3, 4), inds=[0, 1, 2], tags='blue')
b = Tensor(np.random.rand(2, 3, 4), inds=[0, 1, 2], tags='red')
if mismatch:
b.modify(inds=(0, 1, 3))
with pytest.raises(ValueError):
op(a, b)
else:
c = op(a, b)
assert_allclose(c.data, op(a.data, b.data))
def test_tensor_conj_inplace(self):
data = np.random.rand(2, 3, 4) + 1.0j * np.random.rand(2, 3, 4)
a = Tensor(data, inds=[0, 1, 2], tags='blue')
a.conj_()
assert_allclose(data.conj(), a.data)
def test_contract_some(self):
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2])
b = Tensor(np.random.randn(3, 4, 5), inds=[1, 2, 3])
assert a.shared_bond_size(b) == 12
c = a @ b
assert isinstance(c, Tensor)
assert c.shape == (2, 5)
assert c.inds == (0, 3)
def test_contract_all(self):
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2])
b = Tensor(np.random.randn(3, 4, 2), inds=[1, 2, 0])
c = a @ b
assert isinstance(c, float)
assert not isinstance(c, Tensor)
def test_contract_None(self):
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2])
b = Tensor(np.random.randn(3, 4, 5), inds=[3, 4, 5])
c = a @ b
assert c.shape == (2, 3, 4, 3, 4, 5)
assert c.inds == (0, 1, 2, 3, 4, 5)
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2])
b = Tensor(np.random.randn(3, 4, 5), inds=[5, 4, 3])
c = a @ b
assert c.shape == (2, 3, 4, 3, 4, 5)
assert c.inds == (0, 1, 2, 5, 4, 3)
def test_raise_on_triple_inds(self):
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2])
b = Tensor(np.random.randn(3, 4, 5), inds=[1, 1, 2])
with pytest.raises(ValueError):
a @ b
def test_multi_contract(self):
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2],
tags='red')
b = Tensor(np.random.randn(3, 4, 5), inds=[1, 2, 3],
tags='blue')
c = Tensor(np.random.randn(5, 2, 6), inds=[3, 0, 4],
tags='blue')
d = tensor_contract(a, b, c)
assert isinstance(d, Tensor)
assert d.shape == (6,)
assert d.inds == (4,)
assert d.tags == {'red', 'blue'}
def test_contract_with_legal_characters(self):
a = Tensor(np.random.randn(2, 3, 4), inds='abc',
tags='red')
b = Tensor(np.random.randn(3, 4, 5), inds='bcd',
tags='blue')
c = a @ b
assert c.shape == (2, 5)
assert c.inds == ('a', 'd')
def test_contract_with_out_of_range_inds(self):
a = Tensor(np.random.randn(2, 3, 4), inds=[-1, 100, 2200],
tags='red')
b = Tensor(np.random.randn(3, 4, 5), inds=[100, 2200, -3],
tags='blue')
c = a @ b
assert c.shape == (2, 5)
assert c.inds == (-1, -3)
def test_contract_with_wild_mix(self):
a = Tensor(np.random.randn(2, 3, 4), inds=['-1', 'a', 'foo'],
tags='red')
b = Tensor(np.random.randn(3, 4, 5), inds=['a', 'foo', '42.42'],
tags='blue')
c = a @ b
assert c.shape == (2, 5)
assert c.inds == ('-1', '42.42')
def test_fuse(self):
a = Tensor(np.random.rand(2, 3, 4, 5), 'abcd', tags={'blue'})
b = a.fuse({'bra': ['a', 'c'], 'ket': 'bd'})
assert set(b.shape) == {8, 15}
assert set(b.inds) == {'bra', 'ket'}
assert b.tags == {'blue'}
b = a.fuse({'ket': 'bd', 'bra': 'ac'})
assert set(b.shape) == {15, 8}
assert set(b.inds) == {'ket', 'bra'}
assert b.tags == {'blue'}
def test_fuse_leftover(self):
a = Tensor(np.random.rand(2, 3, 4, 5, 2, 2), 'abcdef', tags={'blue'})
b = a.fuse({'bra': 'ac', 'ket': 'bd'})
assert b.shape == (8, 15, 2, 2)
assert b.inds == ('bra', 'ket', 'e', 'f')
assert b.tags == {'blue'}
def test_tensor_transpose(self):
a = Tensor(np.random.rand(2, 3, 4, 5, 2, 2), 'abcdef', tags={'blue'})
at = a.transpose(*'cdfeba')
assert at.shape == (4, 5, 2, 2, 3, 2)
assert at.inds == ('c', 'd', 'f', 'e', 'b', 'a')
with pytest.raises(ValueError):
a.transpose(*'cdfebz')
def test_ownership(self):
a = rand_tensor((2, 2), ('a', 'b'), tags={'X', 'Y'})
b = rand_tensor((2, 2), ('b', 'c'), tags={'X', 'Z'})
assert not a.check_owners()
assert not b.check_owners()
tn = TensorNetwork((a, b), virtual=True)
assert a.check_owners()
assert b.check_owners()
assert a.owners[hash(tn)][0]() is tn
assert b.owners[hash(tn)][0]() is tn
assert all(map(tn.ind_map.__contains__, ('a', 'b', 'c')))
assert all(map(tn.tag_map.__contains__, ('X', 'Y', 'Z')))
a.reindex_({'a': 'd'})
assert 'a' not in tn.ind_map
assert 'd' in tn.ind_map
assert len(tn.tag_map['X']) == 2
b.retag_({'X': 'W'})
assert len(tn.tag_map['X']) == 1
assert 'W' in tn.tag_map
del tn
assert not a.check_owners()
assert not b.check_owners()
def test_isel(self):
T = rand_tensor((2, 3, 4, 5, 6), inds=['a', 'b', 'c', 'd', 'e'])
Tis = T.isel({'d': 2, 'b': 0})
assert Tis.shape == (2, 4, 6)
assert Tis.inds == ('a', 'c', 'e')
assert_allclose(Tis.data, T.data[:, 0, :, 2, :])
def test_cut_iter(self):
psi = MPS_rand_state(10, 7, cyclic=True)
pp = psi.H & psi
bnds = bonds(pp[0], pp[-1])
assert sum(tn ^ all for tn in pp.cut_iter(*bnds)) == pytest.approx(1.0)
assert pp ^ all == pytest.approx(1.0)
@pytest.mark.parametrize("method", ['qr', 'exp', 'mgs', 'svd'])
def test_unitize(self, method):
t = rand_tensor((2, 3, 4), 'abc')
assert t.H @ t != pytest.approx(3.0)
t.unitize('b', inplace=True, method=method)
assert t.H @ t == pytest.approx(3.0)
assert t.inds == ('b', 'a', 'c')
def test_connect(self):
x = rand_tensor((2, 3), 'ab')
y = rand_tensor((3, 2), 'cd')
with pytest.raises(ValueError):
qtn.connect(x, y, 0, 0)
tn = x | y
assert len(tn.outer_inds()) == 4
qtn.connect(x, y, 0, 1)
assert len(tn.outer_inds()) == 2
qtn.connect(x, y, 1, 0)
assert len(tn.outer_inds()) == 0
assert (tn ^ all).shape == ()
# make sure bond is newly labelled
assert set('abcd') & set(tn.all_inds()) == set()
class TestTensorFunctions:
@pytest.mark.parametrize('method', ['svd', 'eig', 'isvd', 'svds'])
@pytest.mark.parametrize('linds', [('a', 'b', 'd'), ('c', 'e')])
@pytest.mark.parametrize('cutoff', [-1.0, 1e-13, 1e-10])
@pytest.mark.parametrize('cutoff_mode', ['abs', 'rel', 'sum2'])
@pytest.mark.parametrize('absorb', ['left', 'both', 'right'])
def test_split_tensor_with_vals(self, method, linds, cutoff,
cutoff_mode, absorb):
a = rand_tensor((2, 3, 4, 5, 6), inds='abcde', tags='red')
a_split = a.split(linds, method=method, cutoff=cutoff,
cutoff_mode=cutoff_mode, absorb=absorb)
assert len(a_split.tensors) == 2
if linds == 'abd':
assert ((a_split.shape == (2, 3, 5, 4, 6)) or
(a_split.shape == (4, 6, 2, 3, 5)))
elif linds == 'edc':
assert ((a_split.shape == (6, 5, 4, 2, 3)) or
(a_split.shape == (2, 3, 6, 5, 4)))
assert (a_split ^ ...).almost_equals(a)
@pytest.mark.parametrize('method', ['qr', 'lq'])
@pytest.mark.parametrize('linds', [('a', 'b', 'd'), ('c', 'e')])
def test_split_tensor_no_vals(self, method, linds):
a = rand_tensor((2, 3, 4, 5, 6), inds='abcde', tags='red')
a_split = a.split(linds, method=method)
assert len(a_split.tensors) == 2
if linds == 'abd':
assert ((a_split.shape == (2, 3, 5, 4, 6)) or
(a_split.shape == (4, 6, 2, 3, 5)))
elif linds == 'edc':
assert ((a_split.shape == (6, 5, 4, 2, 3)) or
(a_split.shape == (2, 3, 6, 5, 4)))
assert (a_split ^ ...).almost_equals(a)
@pytest.mark.parametrize('method', ['svd', 'eig'])
def test_singular_values(self, method):
psim = Tensor(np.eye(2) * 2**-0.5, inds='ab')
assert_allclose(psim.H @ psim, 1.0)
assert_allclose(psim.singular_values('a', method=method)**2,
[0.5, 0.5])
@pytest.mark.parametrize('method', ['svd', 'eig'])
def test_entropy(self, method):
psim = Tensor(np.eye(2) * 2**-0.5, inds='ab')
assert_allclose(psim.H @ psim, 1.0)
assert_allclose(psim.entropy('a', method=method)**2, 1)
@pytest.mark.parametrize('method', ['svd', 'eig'])
def test_entropy_matches_dense(self, method):
p = MPS_rand_state(5, 32)
p_dense = p.to_dense()
real_svn = qu.entropy(p_dense.ptr([2] * 5, [0, 1, 2]))
svn = (p ^ ...).entropy(('k0', 'k1', 'k2'))
assert_allclose(real_svn, svn)
# use tensor to left of bipartition
p.canonize(2)
t1 = p['I2']
left_inds = set(t1.inds) - set(p['I3'].inds)
svn = (t1).entropy(left_inds, method=method)
assert_allclose(real_svn, svn)
# use tensor to right of bipartition
p.canonize(3)
t2 = p['I3']
left_inds = set(t2.inds) & set(p['I2'].inds)
svn = (t2).entropy(left_inds, method=method)
assert_allclose(real_svn, svn)
def test_direct_product(self):
a1 = rand_tensor((2, 3, 4), inds='abc')
b1 = rand_tensor((3, 4, 5), inds='bcd')
a2 = rand_tensor((2, 3, 4), inds='abc')
b2 = rand_tensor((3, 4, 5), inds='bcd')
c1 = (a1 @ b1) + (a2 @ b2)
c2 = (tensor_direct_product(a1, a2, sum_inds=('a')) @
tensor_direct_product(b1, b2, sum_inds=('d')))
assert c1.almost_equals(c2)
def test_direct_product_triple(self):
a1 = rand_tensor((2, 3, 4), inds='abc')
b1 = rand_tensor((3, 4, 5, 6), inds='bcde')
c1 = rand_tensor((6, 7), inds='ef')
a2 = rand_tensor((2, 3, 4), inds='abc')
b2 = rand_tensor((3, 4, 5, 6), inds='bcde').transpose(*'decb')
c2 = rand_tensor((6, 7), inds='ef')
d1 = (a1 @ b1 @ c1) + (a2 @ b2 @ c2)
d2 = (tensor_direct_product(a1, a2, sum_inds=('a')) @
tensor_direct_product(b1, b2, sum_inds=('d')) @
tensor_direct_product(c1, c2, sum_inds=('f')))
assert d1.almost_equals(d2)
@pytest.mark.parametrize("dtype", [float, complex, np.complex128,
np.float_, np.float32, 'raise'])
def test_rand_tensor(self, dtype):
if dtype == 'raise':
with pytest.raises(TypeError):
rand_tensor((2, 3, 4), 'abc', dtype=dtype)
else:
t = rand_tensor((2, 3, 4), 'abc', dtype=dtype)
assert t.dtype == np.dtype(dtype)
tn = t & t
assert tn.dtype == np.dtype(dtype)
def test_squeeze(self):
a = rand_tensor((1, 2, 3, 1, 4), inds='abcde', tags=['hello'])
b = a.squeeze()
assert b.shape == (2, 3, 4)
assert b.inds == ('b', 'c', 'e')
assert 'hello' in b.tags
assert a.shape == (1, 2, 3, 1, 4)
@pytest.mark.parametrize('dtype', [None, 'complex128', 'float32'])
def test_randomize(self, dtype):
a = rand_tensor((2, 3, 4), ['a', 'b', 'c'], dtype='float64')
if dtype is not None:
assert a.dtype != dtype
x1 = a.norm()
a.randomize_(dtype=dtype)
x2 = a.norm()
assert x1 != pytest.approx(x2)
assert a.shape == (2, 3, 4)
if dtype is not None:
assert a.dtype == dtype
else:
assert a.dtype == 'float64'
class TestTensorNetwork:
def test_combining_tensors(self):
a = rand_tensor((2, 3, 4), inds=[0, 1, 2], tags='red')
b = rand_tensor((3, 4, 5), inds=[1, 2, 3], tags='blue')
c = rand_tensor((5, 2, 6), inds=[3, 0, 4], tags='blue')
with pytest.raises(TypeError):
a & np.array([0, 0])
abc1 = (a & b & c).H.contract()
abc2 = (a & (b & c)).H.contract()
abc3 = (TensorNetwork([a, b, c])).H.contract()
abc4 = (TensorNetwork([a, TensorNetwork([b, c])])).H.contract()
abc5 = (TensorNetwork([a]) & TensorNetwork([b, c])).H.contract()
assert_allclose(abc1.data, abc2.data)
assert_allclose(abc1.data, abc3.data)
assert_allclose(abc1.data, abc4.data)
assert_allclose(abc1.data, abc5.data)
def test_copy(self):
a = rand_tensor((2, 3, 4), inds='abc', tags='t0')
b = rand_tensor((2, 3, 4), inds='abd', tags='t1')
tn1 = TensorNetwork((a, b))
tn2 = tn1.copy()
# check can modify tensor structure
tn2['t1'].modify(inds=('a', 'b', 'X'))
assert tn1['t1'] is not tn2['t1']
assert tn2['t1'].inds == ('a', 'b', 'X')
assert tn1['t1'].inds == ('a', 'b', 'd')
# but that data remains the same
assert tn1['t1'].data is tn2['t1'].data
tn2['t1'].data[:] /= 2
assert_allclose(tn1['t1'].data, tn2['t1'].data)
def test_copy_deep(self):
a = rand_tensor((2, 3, 4), inds='abc', tags='t0')
b = rand_tensor((2, 3, 4), inds='abd', tags='t1')
tn1 = TensorNetwork((a, b))
tn2 = tn1.copy(deep=True)
# check can modify tensor structure
tn2['t1'].modify(inds=('a', 'b', 'X'))
assert tn1['t1'] is not tn2['t1']
assert tn2['t1'].inds == ('a', 'b', 'X')
assert tn1['t1'].inds == ('a', 'b', 'd')
# and that data is not the same
assert tn1['t1'].data is not tn2['t1'].data
tn2['t1'].data[:] /= 2
assert_allclose(tn1['t1'].data / 2, tn2['t1'].data)
def test_TensorNetwork_init_checks(self):
a = rand_tensor((2, 3, 4), inds=[0, 1, 2], tags={'red'})
b = rand_tensor((3, 4, 5), inds=[1, 2, 3], tags={'blue'})
c = rand_tensor((3, 4, 5), inds=[1, 2, 3], tags={'blue', 'c'})
with pytest.raises(TypeError):
TensorNetwork(a, b) # missing brackets around ``a, b``.
tn = a & b
with pytest.raises(TypeError):
tn['red'] = 1
tn.add_tag('foo')
assert len(tn['foo']) == 2
with pytest.raises(KeyError):
tn['foo'] = c
tn[('foo', 'blue')] = c
assert 'c' in tn.tags
assert tn[('blue', 'c')] is c
assert 'red' in tn.tags
del tn['red']
assert 'red' not in tn.tags
assert set(tn.tag_map.keys()) == {'blue', 'c'}
tn.drop_tags('c')
assert set(tn.tag_map.keys()) == {'blue'}
tn.drop_tags(['blue'])
assert set(tn.tag_map.keys()) == set()
def test_conj(self):
a_data = np.random.randn(2, 3, 4) + 1.0j * np.random.randn(2, 3, 4)
b_data = np.random.randn(3, 4, 5) + 1.0j * np.random.randn(3, 4, 5)
c_data = np.random.randn(5, 2, 6) + 1.0j * np.random.randn(5, 2, 6)
a = Tensor(a_data, inds=[0, 1, 2], tags={'red', '0'})
b = Tensor(b_data, inds=[1, 2, 3], tags={'blue', '1'})
c = Tensor(c_data, inds=[3, 0, 4], tags={'blue', '2'})
tn = a & b & c
new_tn = tn.conj()
for i, arr in enumerate((a_data, b_data, c_data)):
assert_allclose(new_tn[str(i)].data, arr.conj())
# make sure original network unchanged
for i, arr in enumerate((a_data, b_data, c_data)):
assert_allclose(tn[str(i)].data, arr)
def test_conj_inplace(self):
a_data = np.random.randn(2, 3, 4) + 1.0j * np.random.randn(2, 3, 4)
b_data = np.random.randn(3, 4, 5) + 1.0j * np.random.randn(3, 4, 5)
c_data = np.random.randn(5, 2, 6) + 1.0j * np.random.randn(5, 2, 6)
a = Tensor(a_data, inds=[0, 1, 2], tags={'red', 'I0'})
b = Tensor(b_data, inds=[1, 2, 3], tags={'blue', 'I1'})
c = Tensor(c_data, inds=[3, 0, 4], tags={'blue', 'I2'})
tn = a & b & c
tn.conj_()
for i, arr in enumerate((a_data, b_data, c_data)):
assert_allclose(tn[f"I{i}"].data, arr.conj())
def test_multiply(self):
a = rand_tensor((2, 3, 4), inds=['0', '1', '2'], tags='red')
b = rand_tensor((3, 4, 5), inds=['1', '2', '3'], tags='blue')
c = rand_tensor((5, 2, 6), inds=['3', '0', '4'], tags='blue')
tn = a & b & c
x1 = (tn & tn.H) ^ ...
x2 = ((2 * tn) & tn.H) ^ ...
assert_allclose(2 * x1, x2)
def test_multiply_inplace(self):
a = rand_tensor((2, 3, 4), inds=['0', '1', '2'], tags='red')
b = rand_tensor((3, 4, 5), inds=['1', '2', '3'], tags='blue')
c = rand_tensor((5, 2, 6), inds=['3', '0', '4'], tags='blue')
tn = a & b & c
x1 = (tn & tn.H) ^ ...
tn *= 2
x2 = (tn & tn.H) ^ ...
assert_allclose(4 * x1, x2)
def test_multiply_each(self):
a = rand_tensor((2, 3, 4), inds=['0', '1', '2'], tags='red')
b = rand_tensor((3, 4, 5), inds=['1', '2', '3'], tags='blue')
c = rand_tensor((5, 2, 6), inds=['3', '0', '4'], tags='blue')
tn = a & b & c
x1 = (tn & tn.H) ^ ...
x2 = (tn.multiply_each(2) & tn.H) ^ ...
assert_allclose(2**3 * x1, x2)
def test_divide(self):
a = rand_tensor((2, 3, 4), inds=['0', '1', '2'], tags='red')
b = rand_tensor((3, 4, 5), inds=['1', '2', '3'], tags='blue')
c = rand_tensor((5, 2, 6), inds=['3', '0', '4'], tags='blue')
tn = a & b & c
x1 = (tn & tn.H) ^ ...
x2 = ((tn / 2) & tn.H) ^ ...
assert_allclose(x1 / 2, x2)
def test_divide_inplace(self):
a = rand_tensor((2, 3, 4), inds=['0', '1', '2'], tags='red')
b = rand_tensor((3, 4, 5), inds=['1', '2', '3'], tags='blue')
c = rand_tensor((5, 2, 6), inds=['3', '0', '4'], tags='blue')
tn = a & b & c
x1 = (tn & tn.H) ^ ...
tn /= 2
x2 = (tn & tn.H) ^ ...
assert_allclose(x1 / 4, x2)
def test_multiply_spread(self):
a = rand_tensor([2, 2], inds=['a', 'b'], tags='A')
b = Tensor(a.data, ['b', 'c'], tags='B')
c = Tensor(a.data, ['c', 'd'], tags='C')
tn = (a | b | c)
tn.multiply_(-8j + 1 / 3, spread_over=3)
assert_allclose(tn['A'].data, tn['B'].data)
assert_allclose(tn['B'].data, tn['C'].data)
def test_multiply_spread_neg_stays_real(self):
a = rand_tensor([2, 2], inds=['a', 'b'], tags='A', dtype='float32')
b = Tensor(a.data, ['b', 'c'], tags='B')
c = Tensor(a.data, ['c', 'd'], tags='C')
tn = (a | b | c)
tn.multiply_(-1000)
assert a.dtype == b.dtype == c.dtype == 'float32'
assert_allclose(abs(tn['A'].data), abs(tn['B'].data))
assert_allclose(abs(tn['B'].data), abs(tn['C'].data))
def test_contracting_tensors(self):
a = rand_tensor((2, 3, 4), inds=[0, 1, 2], tags='red')
b = rand_tensor((3, 4, 5), inds=[1, 2, 3], tags='blue')
c = rand_tensor((5, 2, 6), inds=[3, 0, 4], tags='blue')
a_b_c = a & b & c
print(a_b_c)
repr(a_b_c)
assert isinstance(a_b_c, TensorNetwork)
a_bc = a_b_c ^ 'blue'
assert isinstance(a_bc, TensorNetwork)
assert len(a_bc.tensors) == 2
abc = a_bc ^ ['red', 'blue']
assert isinstance(abc, Tensor)
assert_allclose(abc.data, a_b_c.contract().data)
assert len(a_b_c.tensors) == 3
a_b_c ^= 'blue'
assert len(a_b_c.tensors) == 2
def test_cumulative_contract(self):
a = rand_tensor((2, 3, 4), inds=[0, 1, 2], tags='red')
b = rand_tensor((3, 4, 5), inds=[1, 2, 3], tags='blue')
c = rand_tensor((5, 2, 6), inds=[3, 0, 4], tags='green')
d = (a & b & c)
d2 = d.copy()
cd = d >> ['red', 'green', 'blue']
assert cd.shape == (6,)
assert cd.inds == (4,)
# make sure inplace operations didn't effect original tensor
for tag, names in d2.tag_map.items():
assert d.tag_map[tag] == names
# test inplace
d >>= ['red', 'green', 'blue']
assert isinstance(d, Tensor)
def test_contract_with_slices(self):
a = rand_tensor((2, 3, 4), inds=[0, 1, 2], tags='I0')
b = rand_tensor((3, 4, 5), inds=[1, 2, 3], tags='I1')
c = rand_tensor((5, 2, 6), inds=[3, 0, 4], tags='I2')
d = rand_tensor((5, 2, 6), inds=[5, 6, 4], tags='I3')
tn = TensorNetwork((a, b, c, d), structure="I{}")
assert len((tn ^ slice(2)).tensors) == 3
assert len((tn ^ slice(..., 1, -1)).tensors) == 3
assert len((tn ^ slice(-1, 1)).tensors) == 3
assert len((tn ^ slice(None, -2, -1)).tensors) == 3
assert len((tn ^ slice(-2, 0)).tensors) == 3
def test_reindex(self):
a = Tensor(np.random.randn(2, 3, 4), inds=[0, 1, 2], tags='red')
b = Tensor(np.random.randn(3, 4, 5), inds=[1, 2, 3], tags='blue')
c = Tensor(np.random.randn(5, 2, 6), inds=[3, 0, 4], tags='green')
a_b_c = (a & b & c)
d = a_b_c.reindex({4: 'foo', 2: 'bar'})
assert a_b_c.outer_inds() == (4,)
assert d.outer_inds() == ('foo',)
assert set(a_b_c.inner_inds()) == {0, 1, 2, 3}
assert set(d.inner_inds()) == {0, 1, 'bar', 3}
assert d.tensors[0].inds == (0, 1, 'bar')
d = a_b_c.reindex_({4: 'foo', 2: 'bar'})
assert a_b_c.outer_inds() == ('foo',)
assert set(d.inner_inds()) == {0, 1, 'bar', 3}
assert d.tensors[0].inds == (0, 1, 'bar')
def test_add_tag(self):
a = rand_tensor((2, 3, 4), inds='abc', tags={'red'})
b = rand_tensor((2, 3, 4), inds='abc', tags={'blue'})
tn = a & b
tn.add_tag('green')
assert 'green' in tn.tag_map
assert 'green' in tn['red'].tags
assert 'green' in tn['blue'].tags
tn.add_tag('blue')
for t in tn:
assert 'blue' in t.tags
def test_index_by_site(self):
a_data = np.random.randn(2, 3, 4)
b_data = np.random.randn(2, 3, 4)
a = Tensor(a_data, inds='abc', tags={'I0'})
b = Tensor(b_data, inds='abc', tags={'I1'})
tn = TensorNetwork((a, b), structure="I{}")
assert_allclose(tn[0].data, a_data)
new_data = np.random.randn(2, 3, 4)
tn[1] = Tensor(new_data, inds='abc', tags={'I1', 'red'})
assert_allclose(tn['I1'].data, new_data)
assert 'red' in tn['I1'].tags
def test_set_data_in_tensor(self):
a_data = np.random.randn(2, 3, 4)
b_data = np.random.randn(2, 3, 4)
a = Tensor(a_data, inds='abc', tags={'I0'})
b = Tensor(b_data, inds='abc', tags={'I1'})
tn = TensorNetwork((a, b), structure="I{}")
assert_allclose(tn[0].data, a_data)
new_data = np.random.randn(2, 3, 4)
tn[1].modify(data=new_data)
assert_allclose(tn['I1'].data, new_data)
def test_combining_with_no_check_collisions(self):
p1 = MPS_rand_state(5, 3, phys_dim=3)
p2 = MPS_rand_state(5, 3, phys_dim=3)
# shouldn't need to check any collisions
tn = TensorNetwork((p1, p2), check_collisions=False)
# test can contract
assert 0 < abs(tn ^ ...) < 1
def test_retagging(self):
x = rand_tensor((2, 4), inds='ab', tags={'X', 'I0'})
y = rand_tensor((4, 2, 5), inds='bcd', tags={'Y', 'I1'})
z = rand_tensor((5, 3), inds='de', tags={'Z', 'I2'})
tn = TensorNetwork((x, y, z))
tn.retag_({"I0": "I1", "I1": "I2", "I2": "I3", "Z": "A"})
assert set(tn.tag_map.keys()) == {'X', 'I1', 'I2', 'I3', 'Y', 'A'}
def test_squeeze(self):
A, B, C = (rand_tensor((1, 2, 3), 'abc', tags=['I0']),
rand_tensor((2, 3, 4), 'bcd', tags=['I1']),
rand_tensor((4, 1, 1), 'dae', tags=['I2']))
tn = A & B & C
x1 = tn ^ ...
stn = tn.squeeze()
assert tn['I0'].shape == (1, 2, 3)
assert tn['I1'].shape == (2, 3, 4)
assert tn['I2'].shape == (4, 1, 1)
assert stn['I0'].shape == (2, 3)
assert stn['I1'].shape == (2, 3, 4)
assert stn['I2'].shape == (4,)
x2 = stn ^ ...
assert_allclose(x1.data, x2) # x2 should be scalar already
def test_tensors_sorted(self):
tn1, tn2 = TensorNetwork([]), TensorNetwork([])
A, B, C = (rand_tensor((1, 2, 3), 'abc', tags=['I0']),
rand_tensor((2, 3, 4), 'bcd', tags=['I1']),
rand_tensor((4, 1, 1), 'dae', tags=['I2']))
tn1 &= A
tn1 &= B
tn1 &= C
tn2 &= C
tn2 &= A
tn2 &= B
for t1, t2 in zip(tn1.tensors_sorted(), tn2.tensors_sorted()):
assert t1.tags == t2.tags
assert t1.almost_equals(t2)
def test_select_tensors_mode(self):
A, B, C = (rand_tensor((2, 2), 'ab', tags={'0', 'X'}),
rand_tensor((2, 2), 'bc', tags={'1', 'X', 'Y'}),
rand_tensor((2, 3), 'cd', tags={'2', 'Y'}))
tn = A & B & C
ts = tn.select_tensors(('X', 'Y'), which='all')
assert len(ts) == 1
assert not any(map(A.almost_equals, ts))
assert any(map(B.almost_equals, ts))
assert not any(map(C.almost_equals, ts))
ts = tn.select_tensors(('X', 'Y'), which='any')
assert len(ts) == 3
assert any(map(A.almost_equals, ts))
assert any(map(B.almost_equals, ts))
assert any(map(C.almost_equals, ts))
def test_replace_with_identity(self):
A, B, C, D = (rand_tensor((2, 3, 4), 'abc', tags=['I0']),
rand_tensor((4, 5, 6), 'cde', tags=['I1']),
rand_tensor((5, 6, 7), 'def', tags=['I2']),
rand_tensor((7,), 'f', tags=['I3']))
tn = (A & B & C & D)
with pytest.raises(ValueError):
tn.replace_with_identity(('I1', 'I2'), inplace=True)
tn['I2'] = rand_tensor((5, 6, 4), 'def', tags=['I2'])
tn['I3'] = rand_tensor((4,), 'f', tags=['I3'])
tn1 = tn.replace_with_identity(('I1', 'I2'))
assert len(tn1.tensors) == 2
x = tn1 ^ ...
assert set(x.inds) == {'a', 'b'}
A, B, C = (rand_tensor((2, 2), 'ab', tags={'0'}),
rand_tensor((2, 2), 'bc', tags={'1'}),
rand_tensor((2, 3), 'cd', tags={'2'}))
tn = A & B & C
tn2 = tn.replace_with_identity('1')
assert len(tn2.tensors) == 2
x = tn2 ^ ...
assert set(x.inds) == {'a', 'd'}
def test_partition(self):
k = MPS_rand_state(10, 7, site_tag_id='Q{}', structure_bsz=4)
where = [f'Q{i}' for i in range(10) if i % 2 == 1]
k.add_tag('odd', where=where, which='any')
tn_even, tn_odd = k.partition('odd')
assert len(tn_even.tensors) == len(tn_odd.tensors) == 5
assert tn_even.structure == 'Q{}'
assert tn_even.structure_bsz == 4
assert tn_odd.structure == 'Q{}'
assert tn_odd.structure_bsz == 4
assert (tn_even & tn_odd).sites == range(10)
@pytest.mark.parametrize("backend", ['svd', 'eig', 'isvd', 'svds', 'rsvd'])
def test_compress_between(self, backend):
A = rand_tensor((3, 4, 5), 'abd', tags={'T1'})
tensor_direct_product(A, A, inplace=True)
B = rand_tensor((5, 6), 'dc', tags={'T2'})
tensor_direct_product(B, B, inplace=True)
tn = A & B
assert A.shared_bond_size(B) == 10
tn.compress_between('T1', 'T2', backend=backend)
@pytest.mark.parametrize("backend", ['svd', 'eig', 'isvd', 'svds', 'rsvd'])
def compress_all(self, backend):
k = MPS_rand_state(10, 7)
k += k
k /= 2
k.compress_all(max_bond=5, backend=backend)
assert k.max_bond() == 5
assert_allclose(k.H @ k, 1.0)
def test_insert_operator(self):
p = MPS_rand_state(3, 7, tags='KET')
q = p.H.retag({'KET': 'BRA'})
qp = q & p
sz = qu.spin_operator('z').real
qp.insert_operator(sz, ('KET', 'I1'), ('BRA', 'I1'),
tags='SZ', inplace=True)
assert 'SZ' in qp.tags
assert len(qp.tensors) == 7
x1 = qp ^ all
x2 = qu.expec(p.to_dense(), qu.ikron(sz, [2, 2, 2], inds=1))
assert x1 == pytest.approx(x2)
@pytest.mark.parametrize("dtype", (float, complex))
def test_insert_gauge(self, dtype):
k = MPS_rand_state(10, 7, dtype=dtype, normalize=False)
kU = k.copy()
U = rand_tensor((7, 7), dtype=dtype, inds='ab').data
kU.insert_gauge(U, 4, 5)
assert k[3].almost_equals(kU[3])
assert not k[4].almost_equals(kU[4])
assert not k[5].almost_equals(kU[5])
assert k[6].almost_equals(kU[6])
assert k[4].inds == kU[4].inds
assert k[5].inds == kU[5].inds
assert_allclose(k.H @ k, kU.H @ kU)
def test_fuse_multibonds(self):
x = rand_tensor((2, 2, 2), ['a', 'b', 'c'])
y = rand_tensor((2, 2, 2, 2), ['b', 'c', 'd', 'e'])
z = rand_tensor((2, 2, 2), ['a', 'e', 'd'])
tn = (x & y & z)
assert len(tn.inner_inds()) == 5
tn.fuse_multibonds(inplace=True)
assert len(tn.inner_inds()) == 3
def test_graph(self):
import matplotlib
matplotlib.use('Template')
k = MPS_rand_state(10, 7, normalize=False)
k.graph(color=['I0', 'I2'])
def test_graph_with_fixed_pos(self):
n = 7
p = MPS_rand_state(n, 7, tags='KET')
q = MPS_rand_state(n, 7, tags='BRA')
fix = {**{('KET', f'I{i}'): (i, 0) for i in range(n)},
**{('BRA', f'I{i}'): (i, 1) for i in range(n)}}
(q | p).graph(colors=['KET', 'BRA'], fix=fix)
def test_pickle(self):
import tempfile
import os
pytest.importorskip("joblib")
tn = MPS_rand_state(10, 7, tags='KET')
with tempfile.TemporaryDirectory() as tdir:
fname = os.path.join(tdir, "tn.dmp")
qu.save_to_disk(tn, fname)
tn2 = qu.load_from_disk(fname)
assert tn.H @ tn2 == pytest.approx(1.0)
assert all(hash(tn) not in t.owners for t in tn2)
assert all(hash(tn2) in t.owners for t in tn2)
@pytest.mark.parametrize('dtype', [None, 'float32', 'complex128'])
def test_randomize(self, dtype):
psi = MPS_rand_state(5, 3, dtype='float64')
x1 = psi.H @ psi
psi.randomize_(seed=42, dtype=dtype)
x2 = psi.H @ psi
assert x1 != pytest.approx(x2)
if dtype is None:
assert psi.dtype == 'float64'
else:
assert psi.dtype == dtype
psi.randomize_(seed=42, dtype=dtype)
x3 = psi.H @ psi
assert x2 == pytest.approx(x3)
class TestTensorNetworkSimplifications:
def test_rank_simplify(self):
A = rand_tensor([2, 2, 3], 'abc', tags='A')
B = rand_tensor([3, 2], 'cd', tags='B')
C = rand_tensor([2, 2, 2], 'def', tags='C')
tn = A & B & C
tn_s = tn.rank_simplify()
assert tn.num_tensors == 3
assert tn_s.num_tensors == 2
assert (tn ^ all).almost_equals(tn_s ^ all)
# checl that 'B' was absorbed into 'A' not 'C'
assert tn_s['B'].tags == {'A', 'B'}
def test_diagonal_reduce(self):
A = rand_tensor([2, 2], 'ab', dtype=complex)
B = Tensor([[3j, 0.], [0., 4j]], 'bc')
C = rand_tensor([2, 2], 'ca', dtype=complex)
tn = A & B & C
tn_s = tn.diagonal_reduce()
assert tn.num_indices == 3
assert tn_s.num_indices == 2
assert tn ^ all == pytest.approx(tn_s.contract(all, output_inds=[]))
def test_antidiag_gauge(self):
A = rand_tensor([2, 2], 'ab', dtype=complex)
B = Tensor([[0., 3j], [4j, 0.]], 'bc')
C = rand_tensor([2, 2], 'ca', dtype=complex)
tn = A & B & C
assert tn.num_indices == 3
# can't use diagonal reduction yet
assert tn.diagonal_reduce().num_indices == 3
# initial gauge doesn't change indices
tn_a = tn.antidiag_gauge()
assert tn_a.num_indices == 3
# but allows the diagonal reduction
tn_ad = tn_a.diagonal_reduce()
assert tn_ad.num_indices == 2
assert tn ^ all == pytest.approx(tn_ad.contract(all, output_inds=[]))
def test_column_reduce(self):
A = rand_tensor([2, 3], 'ab')
A.new_ind('c', size=4)
B = rand_tensor([4, 5, 6], 'cde')
tn = A & B
assert tn.num_indices == 5
tn_s = tn.column_reduce()
assert tn_s.num_indices == 4
assert (tn ^ all).almost_equals(tn_s ^ all)
class TestTensorNetworkAsLinearOperator:
def test_against_dense(self):
A, B, C, D = (
rand_tensor([3, 5, 5], 'aef'),
rand_tensor([3, 5, 5], 'beg'),
rand_tensor([3, 5, 5], 'cfh'),
rand_tensor([3, 5, 5], 'dhg'),
)
tn = A & B & C & D
tn_lo = tn.aslinearoperator(('a', 'b'), ('c', 'd'))
tn_d = tn.to_dense(['a', 'b'], ['c', 'd'])
u, s, v = qu.svds(tn_lo, k=5, backend='scipy')
ud, sd, vd = qu.svds(tn_d, k=5, backend='scipy')
assert_allclose(s, sd)
# test matmat
X = np.random.randn(9, 8) + 1.0j * np.random.randn(9, 8)
assert_allclose(tn_lo.dot(X), tn_d.dot(X))
@pytest.mark.parametrize("dtype", (float, complex))
@pytest.mark.parametrize("method", ('isvd', 'rsvd'))
def test_replace_with_svd_using_linear_operator(self, dtype, method):
k = MPS_rand_state(100, 10, dtype=dtype, cyclic=True)
b = k.H
b.expand_bond_dimension(11)
k.add_tag('_KET')
b.add_tag('_BRA')
tn = b & k
x1 = tn ^ ...
ul, = tn['_KET', 'I1'].bonds(tn['_KET', 'I2'])
ll, = tn['_BRA', 'I1'].bonds(tn['_BRA', 'I2'])
where = [f'I{i}' for i in range(2, 40)]
tn.replace_with_svd(where, left_inds=(ul, ll), eps=1e-3, method=method,
inplace=True, ltags='_U', rtags='_V')
tn.structure = None
x2 = tn ^ ...
# check ltags and rtags have gone in
assert isinstance(tn['_U'], Tensor)
assert isinstance(tn['_V'], Tensor)
assert_allclose(x1, x2, rtol=1e-4)
def test_TNLinearOperator1D(self):
p = MPS_rand_state(40, 10, dtype=complex)
pp = p.H & p
start, stop = 10, 30
lix = bonds(pp[start - 1], pp[start])
rix = bonds(pp[stop - 1], pp[stop])
sec = pp[start:stop]
A = TNLinearOperator1D(sec, lix, rix, start, stop)
B = sec.aslinearoperator(lix, rix)
s1 = spla.svds(A)[1]
s2 = spla.svds(B)[1]
assert_allclose(s1, s2)
|
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Risk Report
===========
+-----------------+----------------------------------------------------+
| key | value |
+=================+====================================================+
| trading_days | The number of trading days between self.start_date |
| | and self.end_date |
+-----------------+----------------------------------------------------+
| benchmark_volat\| The volatility of the benchmark between |
| ility | self.start_date and self.end_date. |
+-----------------+----------------------------------------------------+
| algo_volatility | The volatility of the algo between self.start_date |
| | and self.end_date. |
+-----------------+----------------------------------------------------+
| treasury_period\| The return of treasuries over the period. Treasury |
| _return | maturity is chosen to match the duration of the |
| | test period. |
+-----------------+----------------------------------------------------+
| sharpe | The sharpe ratio based on the _algorithm_ (rather |
| | than the static portfolio) returns. |
+-----------------+----------------------------------------------------+
| information | The information ratio based on the _algorithm_ |
| | (rather than the static portfolio) returns. |
+-----------------+----------------------------------------------------+
| beta | The _algorithm_ beta to the benchmark. |
+-----------------+----------------------------------------------------+
| alpha | The _algorithm_ alpha to the benchmark. |
+-----------------+----------------------------------------------------+
| excess_return | The excess return of the algorithm over the |
| | treasuries. |
+-----------------+----------------------------------------------------+
| max_drawdown | The largest relative peak to relative trough move |
| | for the portfolio returns between self.start_date |
| | and self.end_date. |
+-----------------+----------------------------------------------------+
| max_leverage | The largest gross leverage between self.start_date |
| | and self.end_date |
+-----------------+----------------------------------------------------+
"""
import logbook
import math
import numpy as np
from zipline.finance import trading
import zipline.utils.math_utils as zp_math
log = logbook.Logger('Risk')
TREASURY_DURATIONS = [
'1month', '3month', '6month',
'1year', '2year', '3year', '5year',
'7year', '10year', '30year'
]
# check if a field in rval is nan, and replace it with
# None.
def check_entry(key, value):
if key != 'period_label':
return np.isnan(value) or np.isinf(value)
else:
return False
############################
# Risk Metric Calculations #
############################
def sharpe_ratio(algorithm_volatility, algorithm_return, treasury_return):
"""
http://en.wikipedia.org/wiki/Sharpe_ratio
Args:
algorithm_volatility (float): Algorithm volatility.
algorithm_return (float): Algorithm return percentage.
treasury_return (float): Treasury return percentage.
Returns:
float. The Sharpe ratio.
"""
if zp_math.tolerant_equals(algorithm_volatility, 0):
return np.nan
return (algorithm_return - treasury_return) / algorithm_volatility
def downside_risk(algorithm_returns, mean_returns, normalization_factor):
rets = algorithm_returns.round(8)
mar = mean_returns.round(8)
mask = rets < mar
downside_diff = rets[mask] - mar[mask]
if len(downside_diff) <= 1:
return 0.0
return np.std(downside_diff, ddof=1) * math.sqrt(normalization_factor)
def sortino_ratio(algorithm_period_return, treasury_period_return, mar):
"""
http://en.wikipedia.org/wiki/Sortino_ratio
Args:
algorithm_returns (np.array-like):
Returns from algorithm lifetime.
algorithm_period_return (float):
Algorithm return percentage from latest period.
mar (float): Minimum acceptable return.
Returns:
float. The Sortino ratio.
"""
if zp_math.tolerant_equals(mar, 0):
return 0.0
return (algorithm_period_return - treasury_period_return) / mar
def information_ratio(algorithm_returns, benchmark_returns):
"""
http://en.wikipedia.org/wiki/Information_ratio
Args:
algorithm_returns (np.array-like):
All returns during algorithm lifetime.
benchmark_returns (np.array-like):
All benchmark returns during algo lifetime.
Returns:
float. Information ratio.
"""
relative_returns = algorithm_returns - benchmark_returns
relative_deviation = relative_returns.std(ddof=1)
if zp_math.tolerant_equals(relative_deviation, 0) or \
np.isnan(relative_deviation):
return 0.0
return np.mean(relative_returns) / relative_deviation
def alpha(algorithm_period_return, treasury_period_return,
benchmark_period_returns, beta):
"""
http://en.wikipedia.org/wiki/Alpha_(investment)
Args:
algorithm_period_return (float):
Return percentage from algorithm period.
treasury_period_return (float):
Return percentage for treasury period.
benchmark_period_return (float):
Return percentage for benchmark period.
beta (float):
beta value for the same period as all other values
Returns:
float. The alpha of the algorithm.
"""
return algorithm_period_return - \
(treasury_period_return + beta *
(benchmark_period_returns - treasury_period_return))
###########################
# End Risk Metric Section #
###########################
def get_treasury_rate(treasury_curves, treasury_duration, day):
rate = None
curve = treasury_curves.ix[day]
# 1month note data begins in 8/2001,
# so we can use 3month instead.
idx = TREASURY_DURATIONS.index(treasury_duration)
for duration in TREASURY_DURATIONS[idx:]:
rate = curve[duration]
if rate is not None:
break
return rate
def search_day_distance(end_date, dt):
tdd = trading.environment.trading_day_distance(dt, end_date)
if tdd is None:
return None
assert tdd >= 0
return tdd
def select_treasury_duration(start_date, end_date):
td = end_date - start_date
if td.days <= 31:
treasury_duration = '1month'
elif td.days <= 93:
treasury_duration = '3month'
elif td.days <= 186:
treasury_duration = '6month'
elif td.days <= 366:
treasury_duration = '1year'
elif td.days <= 365 * 2 + 1:
treasury_duration = '2year'
elif td.days <= 365 * 3 + 1:
treasury_duration = '3year'
elif td.days <= 365 * 5 + 2:
treasury_duration = '5year'
elif td.days <= 365 * 7 + 2:
treasury_duration = '7year'
elif td.days <= 365 * 10 + 2:
treasury_duration = '10year'
else:
treasury_duration = '30year'
return treasury_duration
def choose_treasury(select_treasury, treasury_curves, start_date, end_date,
compound=True):
treasury_duration = select_treasury(start_date, end_date)
end_day = end_date.replace(hour=0, minute=0, second=0, microsecond=0)
search_day = None
if end_day in treasury_curves.index:
rate = get_treasury_rate(treasury_curves,
treasury_duration,
end_day)
if rate is not None:
search_day = end_day
if not search_day:
# in case end date is not a trading day or there is no treasury
# data, search for the previous day with an interest rate.
search_days = treasury_curves.index
# Find rightmost value less than or equal to end_day
i = search_days.searchsorted(end_day)
for prev_day in search_days[i - 1::-1]:
rate = get_treasury_rate(treasury_curves,
treasury_duration,
prev_day)
if rate is not None:
search_day = prev_day
search_dist = search_day_distance(end_date, prev_day)
break
if search_day:
if (search_dist is None or search_dist > 1) and \
search_days[0] <= end_day <= search_days[-1]:
message = "No rate within 1 trading day of end date = \
{dt} and term = {term}. Using {search_day}. Check that date doesn't exceed \
treasury history range."
message = message.format(dt=end_date,
term=treasury_duration,
search_day=search_day)
log.warn(message)
if search_day:
td = end_date - start_date
if compound:
return rate * (td.days + 1) / 365
else:
return rate
message = "No rate for end date = {dt} and term = {term}. Check \
that date doesn't exceed treasury history range."
message = message.format(
dt=end_date,
term=treasury_duration
)
raise Exception(message)
|
|
# -*- coding: utf-8 -*-
"""
flask.ctx
~~~~~~~~~
Implements the objects required to keep the context.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import sys
from functools import update_wrapper
from werkzeug.exceptions import HTTPException
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import appcontext_pushed, appcontext_popped
from ._compat import BROKEN_PYPY_CTXMGR_EXIT, reraise
# a singleton sentinel value for parameter defaults
_sentinel = object()
class _AppCtxGlobals(object):
"""A plain object."""
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __contains__(self, item):
return item in self.__dict__
def __iter__(self):
return iter(self.__dict__)
def __repr__(self):
top = _app_ctx_stack.top
if top is not None:
return '<flask.g of %r>' % top.app.name
return object.__repr__(self)
def after_this_request(f):
"""Executes a function after this request. This is useful to modify
response objects. The function is passed the response object and has
to return the same or a new one.
Example::
@app.route('/')
def index():
@after_this_request
def add_header(response):
response.headers['X-Foo'] = 'Parachute'
return response
return 'Hello World!'
This is more useful if a function other than the view function wants to
modify a response. For instance think of a decorator that wants to add
some headers without converting the return value into a response object.
.. versionadded:: 0.9
"""
_request_ctx_stack.top._after_request_functions.append(f)
return f
def copy_current_request_context(f):
"""A helper function that decorates a function to retain the current
request context. This is useful when working with greenlets. The moment
the function is decorated a copy of the request context is created and
then pushed when the function is called.
Example::
import gevent
from flask import copy_current_request_context
@app.route('/')
def index():
@copy_current_request_context
def do_some_work():
# do some work here, it can access flask.request like you
# would otherwise in the view function.
...
gevent.spawn(do_some_work)
return 'Regular response'
.. versionadded:: 0.10
"""
top = _request_ctx_stack.top
if top is None:
raise RuntimeError('This decorator can only be used at local scopes '
'when a request context is on the stack. For instance within '
'view functions.')
reqctx = top.copy()
def wrapper(*args, **kwargs):
with reqctx:
return f(*args, **kwargs)
return update_wrapper(wrapper, f)
def has_request_context():
"""If you have code that wants to test if a request context is there or
not this function can be used. For instance, you may want to take advantage
of request information if the request object is available, but fail
silently if it is unavailable.
::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and has_request_context():
remote_addr = request.remote_addr
self.remote_addr = remote_addr
Alternatively you can also just test any of the context bound objects
(such as :class:`request` or :class:`g` for truthness)::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and request:
remote_addr = request.remote_addr
self.remote_addr = remote_addr
.. versionadded:: 0.7
"""
return _request_ctx_stack.top is not None
def has_app_context():
"""Works like :func:`has_request_context` but for the application
context. You can also just do a boolean check on the
:data:`current_app` object instead.
.. versionadded:: 0.9
"""
return _app_ctx_stack.top is not None
class AppContext(object):
"""The application context binds an application object implicitly
to the current thread or greenlet, similar to how the
:class:`RequestContext` binds request information. The application
context is also implicitly created if a request context is created
but the application is not on top of the individual application
context.
"""
def __init__(self, app):
self.app = app
self.url_adapter = app.create_url_adapter(None)
self.g = app.app_ctx_globals_class()
# Like request context, app contexts can be pushed multiple times
# but there a basic "refcount" is enough to track them.
self._refcnt = 0
def push(self):
"""Binds the app context to the current context."""
self._refcnt += 1
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
_app_ctx_stack.push(self)
appcontext_pushed.send(self.app)
def pop(self, exc=_sentinel):
"""Pops the app context."""
self._refcnt -= 1
if self._refcnt <= 0:
if exc is _sentinel:
exc = sys.exc_info()[1]
self.app.do_teardown_appcontext(exc)
rv = _app_ctx_stack.pop()
assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
% (rv, self)
appcontext_popped.send(self.app)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop(exc_value)
if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None:
reraise(exc_type, exc_value, tb)
class RequestContext(object):
"""The request context contains all request relevant information. It is
created at the beginning of the request and pushed to the
`_request_ctx_stack` and removed at the end of it. It will create the
URL adapter and request object for the WSGI environment provided.
Do not attempt to use this class directly, instead use
:meth:`~flask.Flask.test_request_context` and
:meth:`~flask.Flask.request_context` to create this object.
When the request context is popped, it will evaluate all the
functions registered on the application for teardown execution
(:meth:`~flask.Flask.teardown_request`).
The request context is automatically popped at the end of the request
for you. In debug mode the request context is kept around if
exceptions happen so that interactive debuggers have a chance to
introspect the data. With 0.4 this can also be forced for requests
that did not fail and outside of ``DEBUG`` mode. By setting
``'flask._preserve_context'`` to ``True`` on the WSGI environment the
context will not pop itself at the end of the request. This is used by
the :meth:`~flask.Flask.test_client` for example to implement the
deferred cleanup functionality.
You might find this helpful for unittests where you need the
information from the context local around for a little longer. Make
sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in
that situation, otherwise your unittests will leak memory.
"""
def __init__(self, app, environ, request=None):
self.app = app
if request is None:
request = app.request_class(environ)
self.request = request
self.url_adapter = app.create_url_adapter(self.request)
self.flashes = None
self.session = None
# Request contexts can be pushed multiple times and interleaved with
# other request contexts. Now only if the last level is popped we
# get rid of them. Additionally if an application context is missing
# one is created implicitly so for each level we add this information
self._implicit_app_ctx_stack = []
# indicator if the context was preserved. Next time another context
# is pushed the preserved context is popped.
self.preserved = False
# remembers the exception for pop if there is one in case the context
# preservation kicks in.
self._preserved_exc = None
# Functions that should be executed after the request on the response
# object. These will be called before the regular "after_request"
# functions.
self._after_request_functions = []
self.match_request()
def _get_g(self):
return _app_ctx_stack.top.g
def _set_g(self, value):
_app_ctx_stack.top.g = value
g = property(_get_g, _set_g)
del _get_g, _set_g
def copy(self):
"""Creates a copy of this request context with the same request object.
This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10
"""
return self.__class__(self.app,
environ=self.request.environ,
request=self.request
)
def match_request(self):
"""Can be overridden by a subclass to hook into the matching
of the request.
"""
try:
url_rule, self.request.view_args = \
self.url_adapter.match(return_rule=True)
self.request.url_rule = url_rule
except HTTPException as e:
self.request.routing_exception = e
def push(self):
"""Binds the request context to the current context."""
# If an exception occurs in debug mode or if context preservation is
# activated under exception situations exactly one context stays
# on the stack. The rationale is that you want to access that
# information under debug situations. However if someone forgets to
# pop that context again we want to make sure that on the next push
# it's invalidated, otherwise we run at risk that something leaks
# memory. This is usually only a problem in test suite since this
# functionality is not active in production environments.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop(top._preserved_exc)
# Before we push the request context we have to ensure that there
# is an application context.
app_ctx = _app_ctx_stack.top
if app_ctx is None or app_ctx.app != self.app:
app_ctx = self.app.app_context()
app_ctx.push()
self._implicit_app_ctx_stack.append(app_ctx)
else:
self._implicit_app_ctx_stack.append(None)
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
_request_ctx_stack.push(self)
# Open the session at the moment that the request context is
# available. This allows a custom open_session method to use the
# request context (e.g. code that access database information
# stored on `g` instead of the appcontext).
self.session = self.app.open_session(self.request)
if self.session is None:
self.session = self.app.make_null_session()
def pop(self, exc=_sentinel):
"""Pops the request context and unbinds it by doing that. This will
also trigger the execution of functions registered by the
:meth:`~flask.Flask.teardown_request` decorator.
.. versionchanged:: 0.9
Added the `exc` argument.
"""
app_ctx = self._implicit_app_ctx_stack.pop()
clear_request = False
if not self._implicit_app_ctx_stack:
self.preserved = False
self._preserved_exc = None
if exc is _sentinel:
exc = sys.exc_info()[1]
self.app.do_teardown_request(exc)
# If this interpreter supports clearing the exception information
# we do that now. This will only go into effect on Python 2.x,
# on 3.x it disappears automatically at the end of the exception
# stack.
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
request_close = getattr(self.request, 'close', None)
if request_close is not None:
request_close()
clear_request = True
rv = _request_ctx_stack.pop()
assert rv is self, 'Popped wrong request context. (%r instead of %r)' \
% (rv, self)
# get rid of circular dependencies at the end of the request
# so that we don't require the GC to be active.
if clear_request:
rv.request.environ['werkzeug.request'] = None
# Get rid of the app as well if necessary.
if app_ctx is not None:
app_ctx.pop(exc)
def auto_pop(self, exc):
if self.request.environ.get('flask._preserve_context') or \
(exc is not None and self.app.preserve_context_on_exception):
self.preserved = True
self._preserved_exc = exc
else:
self.pop(exc)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
# do not pop the request stack if we are in debug mode and an
# exception happened. This will allow the debugger to still
# access the request object in the interactive shell. Furthermore
# the context can be force kept alive for the test client.
# See flask.testing for how this works.
self.auto_pop(exc_value)
if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None:
reraise(exc_type, exc_value, tb)
def __repr__(self):
return '<%s \'%s\' [%s] of %s>' % (
self.__class__.__name__,
self.request.url,
self.request.method,
self.app.name,
)
|
|
"""
deviantart.api
^^^^^^^^^^^^^^
A Python wrapper for the DeviantArt API
:copyright: (c) 2015 by Kevin Eichhorn
"""
from __future__ import absolute_import
try:
from urllib import urlencode
from urllib2 import HTTPError
except ImportError:
from urllib.parse import urlencode
from urllib.error import HTTPError
from sanction import Client
from .deviation import Deviation
from .user import User
from .comment import Comment
from .status import Status
from .message import Message
class DeviantartError(Exception):
"""Representing API Errors"""
@property
def message(self):
return self.args[0]
class Api(object):
"""The API Interface (handles requests to the DeviantArt API)
:param client_id: client_id provided by DeviantArt
:param client_secret: client_secret provided by DeviantArt
:param standard_grant_type: The used authorization type | client_credentials (read-only) or authorization_code
:param scope: The scope of data the application can access
"""
def __init__(
self,
client_id,
client_secret,
redirect_uri="",
standard_grant_type="client_credentials",
scope="browse feed message note stash user user.manage comment.post collection"
):
"""Instantiate Class and create OAuth Client"""
self.client_id = client_id
self.client_secret = client_secret
self.auth_endpoint = "https://www.deviantart.com/oauth2/authorize"
self.token_endpoint = "https://www.deviantart.com/oauth2/token"
self.resource_endpoint = "https://www.deviantart.com/api/v1/oauth2"
self.redirect_uri = redirect_uri
self.standard_grant_type = standard_grant_type
self.scope = scope
self.access_token = None
self.refresh_token = None
self.oauth = Client(
auth_endpoint=self.auth_endpoint,
token_endpoint=self.token_endpoint,
resource_endpoint=self.resource_endpoint,
client_id=self.client_id,
client_secret=self.client_secret,
)
if self.standard_grant_type == "client_credentials":
self.auth()
def auth(self, code="", refresh_token=""):
"""Authenticates user and retrieves (and refreshes) access token
:param code: code provided after redirect (authorization_code only)
:param refresh_token: the refresh_token to update access_token without authorization
"""
if refresh_token:
try:
self.oauth.request_token(grant_type="refresh_token", refresh_token=refresh_token)
self.refresh_token = self.oauth.refresh_token
except HTTPError as e:
if e.code == 401:
raise DeviantartError("Unauthorized: Please check your credentials (client_id and client_secret).")
else:
raise DeviantartError(e)
elif self.standard_grant_type == "authorization_code":
try:
self.oauth.request_token(grant_type=self.standard_grant_type, redirect_uri=self.redirect_uri, code=code)
self.refresh_token = self.oauth.refresh_token
except HTTPError as e:
if e.code == 401:
raise DeviantartError("Unauthorized: Please check your credentials (client_id and client_secret).")
else:
raise DeviantartError(e)
elif self.standard_grant_type == "client_credentials":
try:
self.oauth.request_token(grant_type=self.standard_grant_type)
except HTTPError as e:
if e.code == 401:
raise DeviantartError("Unauthorized: Please check your credentials (client_id and client_secret).")
else:
raise DeviantartError(e)
else:
raise DeviantartError('Unknown grant type.')
self.access_token = self.oauth.access_token
@property
def auth_uri(self):
"""The authorzation URL that should be provided to the user"""
return self.oauth.auth_uri(redirect_uri=self.redirect_uri, scope=self.scope)
def browse_dailydeviations(self):
"""Retrieves Daily Deviations"""
response = self._req('/browse/dailydeviations')
deviations = []
for item in response['results']:
d = Deviation()
d.from_dict(item)
deviations.append(d)
return deviations
def browse_userjournals(self, username, featured=False, offset=0, limit=10):
"""Fetch user journals from user
:param username: name of user to retrieve journals from
:param featured: fetch only featured or not
:param offset: the pagination offset
:param limit: the pagination limit
"""
response = self._req('/browse/user/journals', {
"username":username,
"featured":featured,
"offset":offset,
"limit":limit
})
deviations = []
for item in response['results']:
d = Deviation()
d.from_dict(item)
deviations.append(d)
return {
"results" : deviations,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
}
def browse_morelikethis_preview(self, seed):
"""Fetch More Like This preview result for a seed deviation
:param seed: The deviationid to fetch more like
"""
response = self._req('/browse/morelikethis/preview', {
"seed":seed
})
returned_seed = response['seed']
author = User()
author.from_dict(response['author'])
more_from_artist = []
for item in response['more_from_artist']:
d = Deviation()
d.from_dict(item)
more_from_artist.append(d)
more_from_da = []
for item in response['more_from_da']:
d = Deviation()
d.from_dict(item)
more_from_da.append(d)
return {
"seed" : returned_seed,
"author" : author,
"more_from_artist" : more_from_artist,
"more_from_da" : more_from_da
}
def browse(self, endpoint="hot", category_path="", seed="", q="", timerange="24hr", tag="", offset=0, limit=10):
"""Fetch deviations from public endpoints
:param endpoint: The endpoint from which the deviations will be fetched (hot/morelikethis/newest/undiscovered/popular/tags)
:param category_path: category path to fetch from
:param q: Search query term
:param timerange: The timerange
:param tag: The tag to browse
:param offset: the pagination offset
:param limit: the pagination limit
"""
if endpoint == "hot":
response = self._req('/browse/hot', {
"category_path":category_path,
"offset":offset,
"limit":limit
})
elif endpoint == "morelikethis":
if seed:
response = self._req('/browse/morelikethis', {
"seed":seed,
"category_path":category_path,
"offset":offset,
"limit":limit
})
else:
raise DeviantartError("No seed defined.")
elif endpoint == "newest":
response = self._req('/browse/newest', {
"category_path":category_path,
"q":q,
"offset":offset,
"limit":limit
})
elif endpoint == "undiscovered":
response = self._req('/browse/undiscovered', {
"category_path":category_path,
"offset":offset,
"limit":limit
})
elif endpoint == "popular":
response = self._req('/browse/popular', {
"category_path":category_path,
"q":q,
"timerange":timerange,
"offset":offset,
"limit":limit
})
elif endpoint == "tags":
if tag:
response = self._req('/browse/tags', {
"tag":tag,
"offset":offset,
"limit":limit
})
else:
raise DeviantartError("No tag defined.")
else:
raise DeviantartError("Unknown endpoint.")
deviations = []
for item in response['results']:
d = Deviation()
d.from_dict(item)
deviations.append(d)
return {
"results" : deviations,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
}
def get_categories(self, catpath="/"):
"""Fetch the categorytree
:param catpath: The category to list children of
"""
response = self._req('/browse/categorytree', {
"catpath":catpath
})
categories = response['categories']
return categories
def search_tags(self, tag_name):
"""Searches for tags
:param tag_name: Partial tag name to get autocomplete suggestions for
"""
response = self._req('/browse/tags/search', {
"tag_name":tag_name
})
tags = list()
for item in response['results']:
tags.append(item['tag_name'])
return tags
def get_deviation(self, deviationid):
"""Fetch a single deviation
:param deviationid: The deviationid you want to fetch
"""
response = self._req('/deviation/{}'.format(deviationid))
d = Deviation()
d.from_dict(response)
return d
def whofaved_deviation(self, deviationid, offset=0, limit=10):
"""Fetch a list of users who faved the deviation
:param deviationid: The deviationid you want to fetch
:param offset: the pagination offset
:param limit: the pagination limit
"""
response = self._req('/deviation/whofaved', get_data={
'deviationid' : deviationid,
'offset' : offset,
'limit' : limit
})
users = []
for item in response['results']:
u = {}
u['user'] = User()
u['user'].from_dict(item['user'])
u['time'] = item['time']
users.append(u)
return {
"results" : users,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
}
def get_deviation_metadata(self, deviationids, ext_submission=False, ext_camera=False, ext_stats=False, ext_collection=False):
"""Fetch deviation metadata for a set of deviations
:param deviationid: The deviationid you want to fetch
:param ext_submission: Return extended information - submission information
:param ext_camera: Return extended information - EXIF information (if available)
:param ext_stats: Return extended information - deviation statistics
:param ext_collection: Return extended information - favourited folder information
"""
response = self._req('/deviation/metadata', {
'ext_submission' : ext_submission,
'ext_camera' : ext_camera,
'ext_stats' : ext_stats,
'ext_collection' : ext_collection
},
post_data={
'deviationids[]' : deviationids
})
metadata = []
for item in response['metadata']:
m = {}
m['deviationid'] = item['deviationid']
m['printid'] = item['printid']
m['author'] = User()
m['author'].from_dict(item['author'])
m['is_watching'] = item['is_watching']
m['title'] = item['title']
m['description'] = item['description']
m['license'] = item['license']
m['allows_comments'] = item['allows_comments']
m['tags'] = item['tags']
m['is_favourited'] = item['is_favourited']
m['is_mature'] = item['is_mature']
if "submission" in item:
m['submission'] = item['submission']
if "camera" in item:
m['camera'] = item['camera']
if "collections" in item:
m['collections'] = item['collections']
metadata.append(m)
return metadata
def get_deviation_embeddedcontent(self, deviationid, offset_deviationid="", offset=0, limit=10):
"""Fetch content embedded in a deviation
:param deviationid: The deviationid of container deviation
:param offset_deviationid: UUID of embedded deviation to use as an offset
:param offset: the pagination offset
:param limit: the pagination limit
"""
response = self._req('/deviation/embeddedcontent', {
'deviationid' : deviationid,
'offset_deviationid' : offset_deviationid,
'offset' : 0,
'limit' : 0
})
deviations = []
for item in response['results']:
d = Deviation()
d.from_dict(item)
deviations.append(d)
return {
"results" : deviations,
"has_less" : response['has_less'],
"has_more" : response['has_more'],
"prev_offset" : response['prev_offset'],
"next_offset" : response['next_offset']
}
def get_deviation_content(self, deviationid):
"""Fetch full data that is not included in the main devaition object
The endpoint works with journals and literatures. Deviation objects returned from API contain only excerpt of a journal, use this endpoint to load full content.
Any custom CSS rules and fonts applied to journal are also returned.
:param deviationid: UUID of the deviation to fetch full data for
"""
response = self._req('/deviation/content', {
'deviationid':deviationid
})
content = {}
if "html" in response:
content['html'] = response['html']
if "css" in response:
content['css'] = response['css']
if "css_fonts" in response:
content['css_fonts'] = response['css_fonts']
return content
def download_deviation(self, deviationid):
"""Get the original file download (if allowed)
:param deviationid: The deviationid you want download info for
"""
response = self._req('/deviation/download/{}'.format(deviationid))
return {
'src' : response['src'],
'width' : response['width'],
'height' : response['height'],
'filesize' : response['filesize']
}
def get_collections(self, username="", calculate_size=False, ext_preload=False, offset=0, limit=10):
"""Fetch collection folders
:param username: The user to list folders for, if omitted the authenticated user is used
:param calculate_size: The option to include the content count per each collection folder
:param ext_preload: Include first 5 deviations from the folder
:param offset: the pagination offset
:param limit: the pagination limit
"""
if not username and self.standard_grant_type == "authorization_code":
response = self._req('/collections/folders', {
"calculate_size":calculate_size,
"ext_preload":ext_preload,
"offset":offset,
"limit":limit
})
else:
if not username:
raise DeviantartError("No username defined.")
else:
response = self._req('/collections/folders', {
"username":username,
"calculate_size":calculate_size,
"ext_preload":ext_preload,
"offset":offset,
"limit":limit
})
folders = []
for item in response['results']:
f = {}
f['folderid'] = item['folderid']
f['name'] = item['name']
if "size" in item:
f['size'] = item['size']
if "deviations" in item:
f['deviations'] = []
for deviation_item in item['deviations']:
d = Deviation()
d.from_dict(deviation_item)
f['deviations'].append(d)
folders.append(f)
return {
"results" : folders,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
}
def get_collection(self, folderid, username="", offset=0, limit=10):
"""Fetch collection folder contents
:param folderid: UUID of the folder to list
:param username: The user to list folders for, if omitted the authenticated user is used
:param offset: the pagination offset
:param limit: the pagination limit
"""
if not username and self.standard_grant_type == "authorization_code":
response = self._req('/collections/{}'.format(folderid), {
"offset":offset,
"limit":limit
})
else:
if not username:
raise DeviantartError("No username defined.")
else:
response = self._req('/collections/{}'.format(folderid), {
"username":username,
"offset":offset,
"limit":limit
})
deviations = []
for item in response['results']:
d = Deviation()
d.from_dict(item)
deviations.append(d)
if "name" in response:
name = response['name']
else:
name = None
return {
"results" : deviations,
"name" : name,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
}
def fave(self, deviationid, folderid=""):
"""Add deviation to favourites
:param deviationid: Id of the Deviation to favourite
:param folderid: Optional UUID of the Collection folder to add the favourite into
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
post_data = {}
post_data['deviationid'] = deviationid
if folderid:
post_data['folderid'] = folderid
response = self._req('/collections/fave', post_data = post_data)
return response
def unfave(self, deviationid, folderid=""):
"""Remove deviation from favourites
:param deviationid: Id of the Deviation to unfavourite
:param folderid: Optional UUID remove from a single collection folder
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
post_data = {}
post_data['deviationid'] = deviationid
if folderid:
post_data['folderid'] = folderid
response = self._req('/collections/unfave', post_data = post_data)
return response
def get_gallery_folders(self, username="", calculate_size=False, ext_preload=False, offset=0, limit=10):
"""Fetch gallery folders
:param username: The user to list folders for, if omitted the authenticated user is used
:param calculate_size: The option to include the content count per each gallery folder
:param ext_preload: Include first 5 deviations from the folder
:param offset: the pagination offset
:param limit: the pagination limit
"""
if not username and self.standard_grant_type == "authorization_code":
response = self._req('/gallery/folders', {
"calculate_size":calculate_size,
"ext_preload":ext_preload,
"offset":offset,
"limit":limit
})
else:
if not username:
raise DeviantartError("No username defined.")
else:
response = self._req('/gallery/folders', {
"username":username,
"calculate_size":calculate_size,
"ext_preload":ext_preload,
"offset":offset,
"limit":limit
})
folders = []
for item in response['results']:
f = {}
f['folderid'] = item['folderid']
f['name'] = item['name']
f['name'] = item['name']
if "parent" in item:
f['parent'] = item['parent']
if "deviations" in item:
f['deviations'] = []
for deviation_item in item['deviations']:
d = Deviation()
d.from_dict(deviation_item)
f['deviations'].append(d)
folders.append(f)
return {
"results" : folders,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
}
def get_gallery_all(self, username='', offset=0, limit=10):
"""
Get all of a user's deviations
:param username: The user to query, defaults to current user
:param offset: the pagination offset
:param limit: the pagination limit
"""
if not username:
raise DeviantartError('No username defined.')
response = self._req('/gallery/all', {'username': username,
'offset': offset,
'limit': limit})
deviations = []
for item in response['results']:
d = Deviation()
d.from_dict(item)
deviations.append(d)
if "name" in response:
name = response['name']
else:
name = None
return {
"results": deviations,
"name": name,
"has_more": response['has_more'],
"next_offset": response['next_offset']
}
def get_gallery_folder(self, username="", folderid="", mode="popular", offset=0, limit=10):
"""Fetch gallery folder contents
:param username: The user to query, defaults to current user
:param folderid: UUID of the folder to list, if omitted query ALL folders
:param mode: Sort results by either newest or popular
:param offset: the pagination offset
:param limit: the pagination limit
"""
if not username and self.standard_grant_type == "authorization_code":
response = self._req('/gallery/{}'.format(folderid), {
"mode":mode,
"offset":offset,
"limit":limit
})
else:
if not username:
raise DeviantartError("No username defined.")
else:
response = self._req('/gallery/{}'.format(folderid), {
"username":username,
"mode":mode,
"offset":offset,
"limit":limit
})
deviations = []
for item in response['results']:
d = Deviation()
d.from_dict(item)
deviations.append(d)
if "name" in response:
name = response['name']
else:
name = None
return {
"results" : deviations,
"name" : name,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
}
def get_user(self, username="", ext_collections=False, ext_galleries=False):
"""Get user profile information
:param username: username to lookup profile of
:param ext_collections: Include collection folder info
:param ext_galleries: Include gallery folder info
"""
if not username and self.standard_grant_type == "authorization_code":
response = self._req('/user/whoami')
u = User()
u.from_dict(response)
else:
if not username:
raise DeviantartError("No username defined.")
else:
response = self._req('/user/profile/{}'.format(username), {
'ext_collections' : ext_collections,
'ext_galleries' : ext_galleries
})
u = User()
u.from_dict(response['user'])
return u
# def search_friends(self, q, username=""):
#
# if not username and self.standard_grant_type == "authorization_code":
# response = self._req('/user/friends/search', {
# "q":q
# })
# else:
# if not username:
# raise DeviantartError("No username defined.")
# else:
# response = self._req('/user/friends/search', {
# "username":username,
# "q":q
# })
#
# friends = []
#
# for item in response['results']:
# f = User()
# f.from_dict(item)
#
# return friends
def get_users(self, usernames):
"""Fetch user info for given usernames
:param username: The usernames you want metadata for (max. 50)
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/whois', post_data={
"usernames":usernames
})
users = []
for item in response['results']:
u = User()
u.from_dict(item)
users.append(u)
return users
def watch(
self,
username,
watch={
"friend":True,
"deviations":True,
"journals":True,
"forum_threads":True,
"critiques":True,
"scraps":True,
"activity":True,
"collections":True
}
):
"""Watch a user
:param username: The username you want to watch
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/friends/watch/{}'.format(username), post_data={
"watch[friend]": watch['friend'],
"watch[deviations]": watch['deviations'],
"watch[journals]": watch['journals'],
"watch[forum_threads]": watch['forum_threads'],
"watch[critiques]": watch['critiques'],
"watch[scraps]": watch['scraps'],
"watch[activity]": watch['activity'],
"watch[collections]": watch['collections'],
})
return response['success']
def unwatch(self, username):
"""Unwatch a user
:param username: The username you want to unwatch
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/friends/unwatch/{}'.format(username))
return response['success']
def is_watching(self, username):
"""Check if user is being watched by the given user
:param username: Check if username is watching you
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/friends/watching/{}'.format(username))
return response['watching']
def update_user(self, user_is_artist="", artist_level="", artist_specialty="", real_name="", tagline="", countryid="", website="", bio=""):
"""Update the users profile information
:param user_is_artist: Is the user an artist?
:param artist_level: If the user is an artist, what level are they
:param artist_specialty: If the user is an artist, what is their specialty
:param real_name: The users real name
:param tagline: The users tagline
:param countryid: The users location
:param website: The users personal website
:param bio: The users bio
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
post_data = {}
if user_is_artist:
post_data["user_is_artist"] = user_is_artist
if artist_level:
post_data["artist_level"] = artist_level
if artist_specialty:
post_data["artist_specialty"] = artist_specialty
if real_name:
post_data["real_name"] = real_name
if tagline:
post_data["tagline"] = tagline
if countryid:
post_data["countryid"] = countryid
if website:
post_data["website"] = website
if bio:
post_data["bio"] = bio
response = self._req('/user/profile/update', post_data=post_data)
return response['success']
def get_damntoken(self):
"""Retrieve the dAmn auth token required to connect to the dAmn servers"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/damntoken')
return response['damntoken']
def get_watchers(self, username, offset=0, limit=10):
"""Get the user's list of watchers
:param username: The username you want to get a list of watchers of
:param offset: the pagination offset
:param limit: the pagination limit
"""
response = self._req('/user/watchers/{}'.format(username), {
'offset' : offset,
'limit' : limit
})
watchers = []
for item in response['results']:
w = {}
w['user'] = User()
w['user'].from_dict(item['user'])
w['is_watching'] = item['is_watching']
w['lastvisit'] = item['lastvisit']
w['watch'] = {
"friend" : item['watch']['friend'],
"deviations" : item['watch']['deviations'],
"journals" : item['watch']['journals'],
"forum_threads" : item['watch']['forum_threads'],
"critiques" : item['watch']['critiques'],
"scraps" : item['watch']['scraps'],
"activity" : item['watch']['activity'],
"collections" : item['watch']['collections']
}
watchers.append(w)
return {
"results" : watchers,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
}
def get_friends(self, username, offset=0, limit=10):
"""Get the users list of friends
:param username: The username you want to get a list of friends of
:param offset: the pagination offset
:param limit: the pagination limit
"""
response = self._req('/user/friends/{}'.format(username), {
'offset' : offset,
'limit' : limit
})
friends = []
for item in response['results']:
f = {}
f['user'] = User()
f['user'].from_dict(item['user'])
f['is_watching'] = item['is_watching']
f['lastvisit'] = item['lastvisit']
f['watch'] = {
"friend" : item['watch']['friend'],
"deviations" : item['watch']['deviations'],
"journals" : item['watch']['journals'],
"forum_threads" : item['watch']['forum_threads'],
"critiques" : item['watch']['critiques'],
"scraps" : item['watch']['scraps'],
"activity" : item['watch']['activity'],
"collections" : item['watch']['collections']
}
friends.append(f)
return {
"results" : friends,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
}
def get_statuses(self, username, offset=0, limit=10):
"""Fetch status updates of a user
:param username: The username you want to get a list of status updates from
:param offset: the pagination offset
:param limit: the pagination limit
"""
response = self._req('/user/statuses/', {
"username" : username,
'offset' : offset,
'limit' : limit
})
statuses = []
for item in response['results']:
s = Status()
s.from_dict(item)
statuses.append(s)
return {
"results" : statuses,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
}
def get_status(self, statusid):
"""Fetch the status
:param statusid: Status uuid
"""
response = self._req('/user/statuses/{}'.format(statusid))
s = Status()
s.from_dict(response)
return s
def post_status(self, body="", id="", parentid="", stashid=""):
"""Post a status
:param username: The body of the status
:param id: The id of the object you wish to share
:param parentid: The parentid of the object you wish to share
:param stashid: The stashid of the object you wish to add to the status
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/statuses/post', post_data={
"body":body,
"id":id,
"parentid":parentid,
"stashid":stashid
})
return response['statusid']
def get_countries(self):
"""Get a list of countries"""
response = self._req('/data/countries')
countries = response['results']
return countries
def get_data(self, endpoint="privacy"):
"""Returns policies of DeviantArt"""
if endpoint == "privacy":
response = self._req('/data/privacy')
elif endpoint == "submission":
response = self._req('/data/submission')
elif endpoint == "tos":
response = self._req('/data/tos')
else:
raise DeviantartError("Unknown endpoint.")
return response['text']
def get_comments(self, endpoint="deviation", deviationid="", commentid="", username="", statusid="", ext_item=False, offset=0, limit=10, maxdepth=0):
"""Fetch comments
:param endpoint: The source/endpoint you want to fetch comments from (deviation/profile/status/siblings)
:param deviationid: The deviationid you want to fetch
:param commentid: The commentid you want to fetch
:param username: The username you want to get a list of status updates from
:param statusid: The statusid you want to fetch
:param ext_item: the pagination limit
:param offset: the pagination offset
:param limit: the pagination limit
:param maxdepth: Depth to query replies until
"""
if endpoint == "deviation":
if deviationid:
response = self._req('/comments/{}/{}'.format(endpoint, deviationid), {
"commentid" : commentid,
'offset' : offset,
'limit' : limit,
'maxdepth' : maxdepth
})
else:
raise DeviantartError("No deviationid defined.")
elif endpoint == "profile":
if username:
response = self._req('/comments/{}/{}'.format(endpoint, username), {
"commentid" : commentid,
'offset' : offset,
'limit' : limit,
'maxdepth' : maxdepth
})
else:
raise DeviantartError("No username defined.")
elif endpoint == "status":
if statusid:
response = self._req('/comments/{}/{}'.format(endpoint, statusid), {
"commentid" : commentid,
'offset' : offset,
'limit' : limit,
'maxdepth' : maxdepth
})
else:
raise DeviantartError("No statusid defined.")
elif endpoint == "siblings":
if commentid:
response = self._req('/comments/{}/{}'.format(commentid, endpoint), {
"ext_item" : ext_item,
'offset' : offset,
'limit' : limit
})
else:
raise DeviantartError("No commentid defined.")
else:
raise DeviantartError("Unknown endpoint.")
comments = []
for item in response['thread']:
c = Comment()
c.from_dict(item)
comments.append(c)
return {
"thread" : comments,
"has_less" : response['has_less'],
"has_more" : response['has_more'],
"prev_offset" : response['prev_offset'],
"next_offset" : response['next_offset']
}
def post_comment(self, target, body, comment_type="profile", commentid=""):
"""Post comment
:param target: The target you want to post the comment to (username/deviation UUID/status UUID)
:param body: The comment text
:param comment_type: The type of entry you want to post your comment to
:param commentid: The commentid you are replying to
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
if comment_type == "profile":
response = self._req('/comments/post/profile/{}'.format(target), post_data={
"body":body,
"commentid":commentid
})
elif comment_type == "deviation":
response = self._req('/comments/post/deviation/{}'.format(target), post_data={
"body":body,
"commentid":commentid
})
elif comment_type == "status":
response = self._req('/comments/post/status/{}'.format(target), post_data={
"body":body,
"commentid":commentid
})
else:
raise DeviantartError("Unknown comment type.")
comment = Comment()
comment.from_dict(response)
return comment
def get_messages(self, folderid="", stack=1, cursor=""):
"""Feed of all messages
:param folderid: The folder to fetch messages from, defaults to inbox
:param stack: True to use stacked mode, false to use flat mode
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/messages/feed', {
'folderid' : folderid,
'stack' : stack,
'cursor' : cursor
})
messages = []
for item in response['results']:
m = Message()
m.from_dict(item)
messages.append(m)
return {
"results" : messages,
"has_more" : response['has_more'],
"cursor" : response['cursor']
}
def delete_message(self, messageid="", folderid="", stackid=""):
"""Delete a message or a message stack
:param folderid: The folder to delete the message from, defaults to inbox
:param messageid: The message to delete
:param stackid: The stack to delete
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/messages/delete', post_data={
'folderid' : folderid,
'messageid' : messageid,
'stackid' : stackid
})
return response
def get_feedback(self, feedbacktype="comments", folderid="", stack=1, offset=0, limit=10):
"""Fetch feedback messages
:param feedbacktype: Type of feedback messages to fetch (comments/replies/activity)
:param folderid: The folder to fetch messages from, defaults to inbox
:param stack: True to use stacked mode, false to use flat mode
:param offset: the pagination offset
:param limit: the pagination limit
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/messages/feedback', {
'type' : feedbacktype,
'folderid' : folderid,
'stack' : stack,
'offset' : offset,
'limit' : limit
})
messages = []
for item in response['results']:
m = Message()
m.from_dict(item)
messages.append(m)
return {
"results" : messages,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
}
def get_feedback_in_stack(self, stackid, offset=0, limit=10):
"""Fetch feedback messages in a stack
:param stackid: Id of the stack
:param offset: the pagination offset
:param limit: the pagination limit
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/messages/feedback/{}'.format(stackid), {
'offset' : offset,
'limit' : limit
})
messages = []
for item in response['results']:
m = Message()
m.from_dict(item)
messages.append(m)
return {
"results" : messages,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
}
def get_mentions(self, folderid="", stack=1, offset=0, limit=10):
"""Fetch mention messages
:param folderid: The folder to fetch messages from, defaults to inbox
:param stack: True to use stacked mode, false to use flat mode
:param offset: the pagination offset
:param limit: the pagination limit
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/messages/mentions', {
'folderid' : folderid,
'stack' : stack,
'offset' : offset,
'limit' : limit
})
messages = []
for item in response['results']:
m = Message()
m.from_dict(item)
messages.append(m)
return {
"results" : messages,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
}
def get_mentions_in_stack(self, stackid, offset=0, limit=10):
"""Fetch mention messages in a stack
:param stackid: Id of the stack
:param offset: the pagination offset
:param limit: the pagination limit
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/messages/mentions/{}'.format(stackid), {
'offset' : offset,
'limit' : limit
})
messages = []
for item in response['results']:
m = Message()
m.from_dict(item)
messages.append(m)
return {
"results" : messages,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
}
def get_notes(self, folderid="", offset=0, limit=10):
"""Fetch notes
:param folderid: The UUID of the folder to fetch notes from
:param offset: the pagination offset
:param limit: the pagination limit
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes', {
'folderid' : folderid,
'offset' : offset,
'limit' : limit
})
notes = []
for item in response['results']:
n = {}
n['noteid'] = item['noteid']
n['ts'] = item['ts']
n['unread'] = item['unread']
n['starred'] = item['starred']
n['sent'] = item['sent']
n['subject'] = item['subject']
n['preview'] = item['preview']
n['body'] = item['body']
n['user'] = User()
n['user'].from_dict(item['user'])
n['recipients'] = []
for recipient_item in item['recipients']:
u = User()
u.from_dict(recipient_item)
n['recipients'].append(u)
notes.append(n)
return {
"results" : notes,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
}
def get_note(self, noteid):
"""Fetch a single note
:param folderid: The UUID of the note
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/{}'.format(noteid))
return response
def send_note(self, to, subject="", body="", noetid=""):
"""Send a note
:param to: The username(s) that this note is to
:param subject: The subject of the note
:param body: The body of the note
:param noetid: The UUID of the note that is being responded to
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/send', post_data={
'to[]' : to,
'subject' : subject,
'body' : body,
'noetid' : noetid
})
sent_notes = []
for item in response['results']:
n = {}
n['success'] = item['success']
n['user'] = User()
n['user'].from_dict(item['user'])
sent_notes.append(n)
return sent_notes
def move_notes(self, noteids, folderid):
"""Move notes to a folder
:param noteids: The noteids to move
:param folderid: The folderid to move notes to
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/move', post_data={
'noteids[]' : noteids,
'folderid' : folderid
})
return response
def delete_notes(self, noteids):
"""Delete a note or notes
:param noteids: The noteids to delete
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/delete', post_data={
'noteids[]' : noteids
})
return response
def mark_notes(self, noteids, mark_as):
"""Mark notes
:param noteids: The noteids to delete
:param mark_as: Mark notes as (read/unread/starred/notstarred/spam)
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/mark', post_data={
'noteids[]' : noteids,
'mark_as' : mark_as
})
return response
def get_notes_folders(self):
"""Fetch note folders"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/folders')
return response['results']
def create_notes_folder(self, title, parentid=""):
"""Create new folder
:param title: The title of the folder to create
:param parentid: The UUID of the parent folder
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/folders/create', post_data={
'title' : title,
'parentid' : parentid
})
return response
def rename_notes_folder(self, title, folderid):
"""Rename a folder
:param title: New title of the folder
:param folderid: The UUID of the folder to rename
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/folders/rename/{}'.format(folderid), post_data={
'title' : title
})
return response
def delete_notes_folder(self, folderid):
"""Delete note folder
:param folderid: The UUID of the folder to delete
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/folders/remove/{}'.format(folderid))
return response
def _req(self, endpoint, get_data=dict(), post_data=dict()):
"""Helper method to make API calls
:param endpoint: The endpoint to make the API call to
:param get_data: data send through GET
:param post_data: data send through POST
"""
if get_data:
request_parameter = "{}?{}".format(endpoint, urlencode(get_data))
else:
request_parameter = endpoint
try:
encdata = urlencode(post_data, True).encode('utf-8')
response = self.oauth.request(request_parameter, data=encdata)
self._checkResponseForErrors(response)
except HTTPError as e:
raise DeviantartError(e)
return response
def _checkResponseForErrors(self, response):
"""Checks response for API errors"""
if 'error' in response:
raise DeviantartError(response['error_description'])
|
|
#Distributed under the MIT licesnse.
#Copyright (c) 2014 Dave McCoy (dave.mccoy@cospandesign.com)
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
""" UART
Facilitates communication with the UART core independent of communication
medium
For more details see:
http://wiki.cospandesign.com/index.php?title=Wb_uart
"""
__author__ = 'dave.mccoy@cospandesign.com (Dave McCoy)'
import sys
import os
import time
from array import array as Array
import driver
COSPAN_DESIGN_UART_MODULE = 0x01
#Register Constants
CONTROL = 0
STATUS = 1
PRESCALER = 2
CLOCK_DIVIDER = 3
WRITE_AVAILABLE = 4
WRITE_DATA = 5
READ_COUNT = 6
READ_DATA = 7
#Control Bit values
CONTROL_RESET = 0
CONTROL_RTS_CTS_FC = 1
CONTROL_DTS_DSR_FC = 2
CONTROL_INT_READ = 3
CONTROL_INT_WRITE = 4
#Status Bit values
STATUS_OVFL_TX = 0
STATUS_OVFL_RX = 1
STATUS_UFL_RX = 2
STATUS_INT_READ = 3
STATUS_INT_WRITE = 4
class UARTError (Exception):
"""UART Error:
Errors associated with UART
UART Bus Busy
Incorrect Settings
"""
pass
class UART(driver.Driver):
"""UART
"""
@staticmethod
def get_abi_class():
return 0
@staticmethod
def get_abi_major():
return driver.get_device_id_from_name("uart")
@staticmethod
def get_abi_minor():
return COSPAN_DESIGN_UART_MODULE
def __init__(self, nysa, urn, debug = False):
super(UART, self).__init__(nysa, urn, debug)
def get_control(self):
"""get_control
reads the control register
Args:
Nothing
Return:
32-bit control register value
Raises:
NysaCommError: Error in communication
"""
return self.read_register(CONTROL)
def set_control(self, control):
"""set_control
write the control register
Args:
control: 32-bit control value
Return:
Nothing
Raises:
NysaCommError: Error in communication
"""
self.write_register(CONTROL, control)
def get_status(self):
"""get_status
get the status of the UART
*** NOTE:
*** because the status is reset by the core
*** the status should only be read once
*** Read the status with get_status
*** then perform the tests on the status
Args:
Nothing
Return:
32-bit status value
Raises:
NysaCommError: Error in communication
"""
self.status = self.read_register(STATUS)
return self.status
def reset(self):
self.get_status()
self.set_control(0)
self.set_register_bit(CONTROL, CONTROL_RESET)
def is_read_overflow(self):
"""is_read_overflow
if read overfow
*** NOTE:
*** because the status is reset by the core
*** the status should only be read once
*** Read the status with get_status
*** then perform the tests on the status
Args:
Nothing
Return:
True: empty
False: not empty
Raises:
NysaCommError
"""
#status = self.get_status()
if ((self.status & STATUS_OVFL_RX) > 0):
return True
return False
def is_write_overflow(self):
"""is_write_overflow
if write is buffer overflowed
*** NOTE:
*** because the status is reset by the core
*** the status should only be read once
*** Read the status with get_status
*** then perform the tests on the status
Args:
Nothing
Return:
True: Overflow
False: No Overflow
Raises:
NysaCommError
"""
#status = self.get_status()
if ((self.status & STATUS_OVFL_TX) > 0):
return True
return False
def is_read_underflow(self):
"""is_read_underflow
Read too many bytes from the read
*** NOTE:
*** because the status is reset by the core
*** the status should only be read once
*** Read the status with get_status
*** then perform the tests on the status
Args:
Nothing
Return:
True: read underflow
False: not read underflow
Raises:
NysaCommError
"""
#status = self.get_status()
if ((self.status & STATUS_UFL_RX) > 0):
return True
return False
def is_read_interrupt(self):
"""is_read_interrupt
test if a read interrupt has occured
*** NOTE:
*** because the status is reset by the core
*** the status should only be read once
*** Read the status with get_status
*** then perform the tests on the status
Args:
Nothing
Return:
True: read interrupt
False: read interrupt did not occure
Raises:
NysaCommError
"""
#status = self.get_status()
if ((self.status & STATUS_INT_READ) > 0):
return True
return False
def is_write_interrupt(self):
"""is_write_interrupt
test if a write interrupt has occured
*** NOTE:
*** because the status is reset by the core
*** the status should only be read once
*** Read the status with get_status
*** then perform the tests on the status
Args:
Nothing
Return:
True: write interrupt
False: not write interrupt
Raises:
NysaCommError
"""
#status = self.get_status()
if ((self.status & STATUS_INT_WRITE) > 0):
return True
return False
def write_string(self, string = ""):
"""write_string
Writes a string of data over the UART
Args:
string: String to send
Return:
Nothing
Raises:
NysaCommError
"""
if self.debug:
print "Writing a string"
data = Array('B')
print "string to write: %s" % string
print "Length of string: %d" % len(string)
data.fromstring(string)
print "string to write (as an array): %s" % data[:len(string)]
self.write_raw(data, len(string))
def write_byte(self, data):
"""write_byte
Writes a byte of data over the UART
Args:
int: byte of data to send
Return:
Nothing
Raises:
NysaCommError
"""
write_data = Array('B', [data])
self.write_raw(write_data, 1)
def write_raw(self, data, length):
"""write_raw
formats the data to write to the UART device
the format of the data can be found on
http://wiki.cospandesign.com/index.php?title=Wb_uart#Write_Data
Args:
data: data (in raw byte array) to send down to the UART
Returns:
Nothing
Raises:
NysaCommError: Error in communication
"""
if self.debug:
print "Writing to the UART device"
print "Data to send down: %s" % str(data)
print "Length of data to send down: %d" % length
data_array = Array('B')
data_array.extend([((length >> 8) & 0xFF), (((length) & 0xFF))])
data_array.extend(data[:length])
print "sending: %s" % str(data_array)
print "Length: %d" % length
pad = (len(data_array) % 4)
for i in range (0, pad):
data_array.extend([0])
print "Final data array: %s" % data_array
self.write(WRITE_DATA, data_array)
def read_string(self, count = -1):
"""read_string
Read a string of characters
Args:
count: the number of characters and returns a string
if -1 read all characters
Returns:
string
Raises:
NysaCommError: Error in communication
"""
if self.debug:
print "read_string: Read count: %d" % count
#print "count: %d" % count
data = Array('B')
if count == -1:
data = self.read_all_data()
else:
data = self.read_raw(count)
print "read_string: returned data: %s" % data
#byte_data = Array('B')
#for i in range (len(data) / 4):
# byte_data.append(data[i * 4])
#print "\tread_string: data: %s" % byte_data
print "\tread_string: data: %s" % str(data)
#string = byte_data.tostring()
string = data.tostring()
return string
def read_raw(self, count = 1):
"""read_raw
reads the number of bytes specified by count from the UART and
extracts/returns only the raw bytes to the user
Args:
count: the number of bytes to read from the UART core, if
left blank this will read just one byte
Returns:
An array of raw bytes read from the core
Raises:
NysaCommError: Error in communication
"""
available = self.get_read_count()
if available < count:
count = available
if count <= 4:
word_count = 1
else:
#count = ((count - 2) / 4) + 1
word_count = (count / 4) + 1
#Tell the core we are going to read the specified amount of bytes
self.write_register(READ_COUNT, count)
data = self.read(READ_DATA, word_count)[0:count]
#data = self.read(READ_DATA, word_count)
print "Reading %d bytes" % count
print "Output byte count: " + str(len(data))
print "Byte Data: %s" % str(data)
return data
def get_read_count(self):
"""get_read_count
reads the number of bytes available in the read FIFO
Args:
Nothing
Returns:
Number of bytes available in the read FIFO
Raises:
NysaCommError
"""
return self.read_register(READ_COUNT)
def read_all_data(self):
"""read_all_data
reads all the data in the UART read FIFO
Uses 'get_read_count' to find the number of bytes available
then read those number of bytes and return them to the user
Args:
Nothing
Returns:
An array of raw bytes read from the core
Raises:
NysaCommError: Error in communication
"""
if self.debug:
print "read all the data in the UART input FIFO"
count = self.get_read_count()
print "read_all_data: count: %d" % count
#while count > 0:
data = self.read_raw(count)
print "read_all_data: output data: %s" % str(data)
#count = self.get_read_count()
#time.sleep(0.05)
return data
def get_write_available(self):
"""get_write_available
returns the number of bytes that can be written into the write buffer
Args:
Nothing
Returns:
Number of bytes that can be written into the write buffer
Raises:
NysaCommError: Error in communication
"""
if self.debug:
print "getting available space in the write buffer"
return self.read_register(WRITE_AVAILABLE)
def get_baudrate(self):
"""get_baudrate
returns the baudrate of the UART
This function performs the calculations required to extract the baudrate
from the value within the UART core.
For details on the calculations see:
http://wiki.cospandesign.com/index.php?title=Wb_uart#Prescaler
Args:
Nothing
Return:
The baudrtate: e.g.: 115200
Raises:
NysaCommError: Error in communication
"""
if self.debug:
print "getting baurdrate"
prescaler = self.read_register(PRESCALER)
print "prescaler: %d" % prescaler
clock_divider = self.read_register(CLOCK_DIVIDER)
print "clock divide: %d" % clock_divider
if prescaler == 0:
raise UARTError("Prescaler read from UART core is 0 (That's bad)")
return prescaler / clock_divider
def set_baudrate(self, baudrate=115200):
"""set_baudrate
sets the baudrate of the UART core
This function performs the required calculations to generate the correct
clock value used by the low level core.
For details on the calculations see:
http://wiki.cospandesign.com/index.php?title=Wb_uart#Clock_Divider
Args:
baudrate: e.g.: 115200
Returns:
Nothing
Raises:
NysaCommError: Error in communication
"""
if self.debug:
print "setting baudrate"
prescaler = self.read_register(PRESCALER)
clock_divider = prescaler / baudrate
self.write_register(CLOCK_DIVIDER, clock_divider)
def enable_hard_flowcontrol(self):
"""enable_hard_flowcontrol
enables the use of CTS/RTS hardware flow control
Args:
Nothing
Returns:
Nothing
Raises:
NysaCommError: Error in communication
"""
if self.debug:
print "setting cts/rts flowcontrol"
self.set_register_bit(CONTROL, CONTROL_RTS_CTS_FC)
def enable_soft_flowcontrol(self):
"""enable_soft_flowcontrol
enables the use of XON XOFF software flow control
***NOTE THIS FUNCTION IS NOT IMPLEMENTED IN THE CORE YET***
Args:
Nothing
Returns:
Nothing
Raises:
NysaCommError: Error in communication
"""
Exception("Soft flow control not implemented yet!")
def disable_flowcontrol(self):
"""disable_flowcontrol
disable flow control (this is the default setting)
Args:
Nothing
Returns:
Nothing
Raises:
NysaCommError: Error in communication
"""
if self.debug:
print "Disable flow control"
control = self.get_control()
control = control & ~(CONTROL_RTS_CTS_FC | CONTROL_DTS_DSR_FC)
self.set_control(control)
def enable_read_interrupt(self):
"""enable_read_interrupt
enable the read interrupt for the UART
"""
if self.debug:
print "Enable the read interrupt"
self.set_register_bit(CONTROL, CONTROL_INT_READ)
def disable_read_interrupt(self):
"""disable_read_interrupt
disable the read interrupt for the UART
"""
if self.debug:
print "Disable the read interrupt"
self.clear_register_bit(CONTROL, CONTROL_INT_READ)
def enable_write_interrupt(self):
"""enable_write_interrupt
Enable the write interrupt
"""
if self.debug:
print "Enable the write interrupt"
self.set_register_bit(CONTROL, CONTROL_INT_WRITE)
def disable_write_interrupt(self):
"""disable_write_interrupt
Disable the write interrupt
"""
if self.debug:
print "Disable the write interrupt"
self.clear_register_bit(CONTROL, CONTROL_INT_WRITE)
def disable_interrupts(self):
"""disable_interrupts
Disable all interrupts
"""
if self.debug:
print "Disable interrupts"
control = self.get_control()
control = control & ~(CONTROL_INT_WRITE | CONTROL_INT_READ)
self.set_control(control)
self.get_status()
|
|
# coding: utf-8
# Copyright 2014 Globo.com Player authors. All rights reserved.
# Use of this source code is governed by a MIT License
# license that can be found in the LICENSE file.
# Tests M3U8 class to make sure all attributes and methods use the correct
# data returned from parser.parse()
import arrow
import datetime
import m3u8
import playlists
from m3u8.model import Segment, Key
def test_target_duration_attribute():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
mock_parser_data(obj, {'targetduration': '1234567'})
assert '1234567' == obj.target_duration
def test_media_sequence_attribute():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
mock_parser_data(obj, {'media_sequence': '1234567'})
assert '1234567' == obj.media_sequence
def test_program_date_time_attribute():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST_WITH_PROGRAM_DATE_TIME)
assert arrow.get('2014-08-13T13:36:33+00:00').datetime == obj.program_date_time
def test_program_date_time_attribute_for_each_segment():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST_WITH_PROGRAM_DATE_TIME)
first_program_date_time = arrow.get('2014-08-13T13:36:33+00:00').datetime
for idx, segment in enumerate(obj.segments):
assert segment.program_date_time == first_program_date_time + \
datetime.timedelta(seconds=idx * 3)
def test_program_date_time_attribute_with_discontinuity():
obj = m3u8.M3U8(playlists.DISCONTINUITY_PLAYLIST_WITH_PROGRAM_DATE_TIME)
first_program_date_time = arrow.get('2014-08-13T13:36:33+00:00').datetime
discontinuity_program_date_time = arrow.get('2014-08-13T13:36:55+00:00').datetime
segments = obj.segments
assert segments[0].program_date_time == first_program_date_time
assert segments[5].program_date_time == discontinuity_program_date_time
assert segments[6].program_date_time == discontinuity_program_date_time + datetime.timedelta(seconds=3)
def test_segment_discontinuity_attribute():
obj = m3u8.M3U8(playlists.DISCONTINUITY_PLAYLIST_WITH_PROGRAM_DATE_TIME)
segments = obj.segments
assert segments[0].discontinuity == False
assert segments[5].discontinuity == True
assert segments[6].discontinuity == False
def test_segment_cue_out_attribute():
obj = m3u8.M3U8(playlists.CUE_OUT_PLAYLIST)
segments = obj.segments
assert segments[1].cue_out == True
assert segments[2].cue_out == True
assert segments[3].cue_out == False
def test_segment_elemental_scte35_attribute():
obj = m3u8.M3U8(playlists.CUE_OUT_ELEMENTAL_PLAYLIST)
segments = obj.segments
assert segments[4].cue_out == True
assert segments[9].cue_out == False
assert segments[4].scte35 == '/DAlAAAAAAAAAP/wFAUAAAABf+//wpiQkv4ARKogAAEBAQAAQ6sodg=='
def test_segment_envivio_scte35_attribute():
obj = m3u8.M3U8(playlists.CUE_OUT_ENVIVIO_PLAYLIST)
segments = obj.segments
assert segments[3].cue_out == True
assert segments[4].scte35 == '/DAlAAAENOOQAP/wFAUBAABrf+//N25XDf4B9p/gAAEBAQAAxKni9A=='
assert segments[5].scte35 == '/DAlAAAENOOQAP/wFAUBAABrf+//N25XDf4B9p/gAAEBAQAAxKni9A=='
assert segments[7].cue_out == False
def test_keys_on_clear_playlist():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
assert len(obj.keys) == 1
assert obj.keys[0] == None
def test_keys_on_simple_encrypted_playlist():
obj = m3u8.M3U8(playlists.PLAYLIST_WITH_ENCRIPTED_SEGMENTS)
assert len(obj.keys) == 1
assert obj.keys[0].uri == "https://priv.example.com/key.php?r=52"
def test_key_attribute():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
data = {'keys': [{'method': 'AES-128',
'uri': '/key',
'iv': 'foobar'}]}
mock_parser_data(obj, data)
assert 'Key' == obj.keys[0].__class__.__name__
assert 'AES-128' == obj.keys[0].method
assert '/key' == obj.keys[0].uri
assert 'foobar' == obj.keys[0].iv
def test_key_attribute_on_none():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
mock_parser_data(obj, {})
assert len(obj.keys) == 0
def test_key_attribute_without_initialization_vector():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
mock_parser_data(obj, {'keys': [{'method': 'AES-128',
'uri': '/key'}]})
assert 'AES-128' == obj.keys[0].method
assert '/key' == obj.keys[0].uri
assert None == obj.keys[0].iv
def test_segments_attribute():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
mock_parser_data(obj, {'segments': [{'uri': '/foo/bar-1.ts',
'title': 'First Segment',
'duration': 1500},
{'uri': '/foo/bar-2.ts',
'title': 'Second Segment',
'duration': 1600}]})
assert 2 == len(obj.segments)
assert '/foo/bar-1.ts' == obj.segments[0].uri
assert 'First Segment' == obj.segments[0].title
assert 1500 == obj.segments[0].duration
def test_segments_attribute_without_title():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
mock_parser_data(obj, {'segments': [{'uri': '/foo/bar-1.ts',
'duration': 1500}]})
assert 1 == len(obj.segments)
assert '/foo/bar-1.ts' == obj.segments[0].uri
assert 1500 == obj.segments[0].duration
assert None == obj.segments[0].title
def test_segments_attribute_without_duration():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
mock_parser_data(obj, {'segments': [{'uri': '/foo/bar-1.ts',
'title': 'Segment title'}]})
assert 1 == len(obj.segments)
assert '/foo/bar-1.ts' == obj.segments[0].uri
assert 'Segment title' == obj.segments[0].title
assert None == obj.segments[0].duration
def test_segments_attribute_with_byterange():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
mock_parser_data(obj, {'segments': [{'uri': '/foo/bar-1.ts',
'title': 'Segment title',
'duration': 1500,
'byterange': '76242@0'}]})
assert 1 == len(obj.segments)
assert '/foo/bar-1.ts' == obj.segments[0].uri
assert 'Segment title' == obj.segments[0].title
assert 1500 == obj.segments[0].duration
assert '76242@0' == obj.segments[0].byterange
def test_segment_attribute_with_multiple_keys():
obj = m3u8.M3U8(playlists.PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV_WITH_MULTIPLE_KEYS)
segments = obj.segments
assert segments[0].key.uri == '/hls-key/key.bin'
assert segments[1].key.uri == '/hls-key/key.bin'
assert segments[4].key.uri == '/hls-key/key2.bin'
assert segments[5].key.uri == '/hls-key/key2.bin'
def test_is_variant_attribute():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
mock_parser_data(obj, {'is_variant': False})
assert not obj.is_variant
mock_parser_data(obj, {'is_variant': True})
assert obj.is_variant
def test_is_endlist_attribute():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
mock_parser_data(obj, {'is_endlist': False})
assert not obj.is_endlist
obj = m3u8.M3U8(playlists.SLIDING_WINDOW_PLAYLIST)
mock_parser_data(obj, {'is_endlist': True})
assert obj.is_endlist
def test_is_i_frames_only_attribute():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
mock_parser_data(obj, {'is_i_frames_only': False})
assert not obj.is_i_frames_only
mock_parser_data(obj, {'is_i_frames_only': True})
assert obj.is_i_frames_only
def test_playlists_attribute():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
data = {'playlists': [{'uri': '/url/1.m3u8',
'stream_info': {'program_id': 1,
'bandwidth': 320000,
'video': 'high'}},
{'uri': '/url/2.m3u8',
'stream_info': {'program_id': 1,
'bandwidth': 120000,
'codecs': 'mp4a.40.5',
'video': 'low'}},
],
'media': [{'type': 'VIDEO', 'name': 'High', 'group_id': 'high'},
{'type': 'VIDEO', 'name': 'Low', 'group_id': 'low',
'default': 'YES', 'autoselect': 'YES'}
]
}
mock_parser_data(obj, data)
assert 2 == len(obj.playlists)
assert '/url/1.m3u8' == obj.playlists[0].uri
assert 1 == obj.playlists[0].stream_info.program_id
assert 320000 == obj.playlists[0].stream_info.bandwidth
assert None == obj.playlists[0].stream_info.codecs
assert None == obj.playlists[0].media[0].uri
assert 'high' == obj.playlists[0].media[0].group_id
assert 'VIDEO' == obj.playlists[0].media[0].type
assert None == obj.playlists[0].media[0].language
assert 'High' == obj.playlists[0].media[0].name
assert None == obj.playlists[0].media[0].default
assert None == obj.playlists[0].media[0].autoselect
assert None == obj.playlists[0].media[0].forced
assert None == obj.playlists[0].media[0].characteristics
assert '/url/2.m3u8' == obj.playlists[1].uri
assert 1 == obj.playlists[1].stream_info.program_id
assert 120000 == obj.playlists[1].stream_info.bandwidth
assert 'mp4a.40.5' == obj.playlists[1].stream_info.codecs
assert None == obj.playlists[1].media[0].uri
assert 'low' == obj.playlists[1].media[0].group_id
assert 'VIDEO' == obj.playlists[1].media[0].type
assert None == obj.playlists[1].media[0].language
assert 'Low' == obj.playlists[1].media[0].name
assert 'YES' == obj.playlists[1].media[0].default
assert 'YES' == obj.playlists[1].media[0].autoselect
assert None == obj.playlists[1].media[0].forced
assert None == obj.playlists[1].media[0].characteristics
assert [] == obj.iframe_playlists
def test_playlists_attribute_without_program_id():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
mock_parser_data(obj, {'playlists': [{'uri': '/url/1.m3u8',
'stream_info': {'bandwidth': 320000}}
]})
assert 1 == len(obj.playlists)
assert '/url/1.m3u8' == obj.playlists[0].uri
assert 320000 == obj.playlists[0].stream_info.bandwidth
assert None == obj.playlists[0].stream_info.codecs
assert None == obj.playlists[0].stream_info.program_id
def test_playlists_attribute_with_resolution():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST_WITH_RESOLUTION)
assert 2 == len(obj.playlists)
assert (512, 288) == obj.playlists[0].stream_info.resolution
assert None == obj.playlists[1].stream_info.resolution
def test_iframe_playlists_attribute():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
data = {
'iframe_playlists': [{'uri': '/url/1.m3u8',
'iframe_stream_info': {'program_id': 1,
'bandwidth': 320000,
'resolution': '320x180',
'codecs': 'avc1.4d001f'}},
{'uri': '/url/2.m3u8',
'iframe_stream_info': {'bandwidth': '120000',
'codecs': 'avc1.4d400d'}}]
}
mock_parser_data(obj, data)
assert 2 == len(obj.iframe_playlists)
assert '/url/1.m3u8' == obj.iframe_playlists[0].uri
assert 1 == obj.iframe_playlists[0].iframe_stream_info.program_id
assert 320000 == obj.iframe_playlists[0].iframe_stream_info.bandwidth
assert (320, 180) == obj.iframe_playlists[0].iframe_stream_info.resolution
assert 'avc1.4d001f' == obj.iframe_playlists[0].iframe_stream_info.codecs
assert '/url/2.m3u8' == obj.iframe_playlists[1].uri
assert None == obj.iframe_playlists[1].iframe_stream_info.program_id
assert '120000' == obj.iframe_playlists[1].iframe_stream_info.bandwidth
assert None == obj.iframe_playlists[1].iframe_stream_info.resolution
assert 'avc1.4d400d' == obj.iframe_playlists[1].iframe_stream_info.codecs
def test_version_attribute():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
mock_parser_data(obj, {'version': '2'})
assert '2' == obj.version
mock_parser_data(obj, {})
assert None == obj.version
def test_allow_cache_attribute():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
mock_parser_data(obj, {'allow_cache': 'no'})
assert 'no' == obj.allow_cache
mock_parser_data(obj, {})
assert None == obj.allow_cache
def test_files_attribute_should_list_all_files_including_segments_and_key():
obj = m3u8.M3U8(playlists.PLAYLIST_WITH_ENCRIPTED_SEGMENTS)
files = [
'https://priv.example.com/key.php?r=52',
'http://media.example.com/fileSequence52-1.ts',
'http://media.example.com/fileSequence52-2.ts',
'http://media.example.com/fileSequence52-3.ts',
]
assert files == obj.files
def test_vod_playlist_type_should_be_imported_as_a_simple_attribute():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST_WITH_VOD_PLAYLIST_TYPE)
assert obj.playlist_type == 'vod'
def test_event_playlist_type_should_be_imported_as_a_simple_attribute():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST_WITH_EVENT_PLAYLIST_TYPE)
assert obj.playlist_type == 'event'
def test_independent_segments_should_be_true():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST_WITH_INDEPENDENT_SEGMENTS)
assert obj.is_independent_segments
def test_independent_segments_should_be_false():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST_WITH_EVENT_PLAYLIST_TYPE)
assert not obj.is_independent_segments
def test_no_playlist_type_leaves_attribute_empty():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
assert obj.playlist_type is None
def test_dump_playlists_with_resolution():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST_WITH_RESOLUTION)
expected = playlists.SIMPLE_PLAYLIST_WITH_RESOLUTION.strip().splitlines()
assert expected == obj.dumps().strip().splitlines()
def test_dump_should_build_file_with_same_content(tmpdir):
obj = m3u8.M3U8(playlists.PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV)
expected = playlists.PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV_SORTED.replace(', IV', ',IV').strip()
filename = str(tmpdir.join('playlist.m3u8'))
obj.dump(filename)
assert_file_content(filename, expected)
def test_dump_should_create_sub_directories(tmpdir):
obj = m3u8.M3U8(playlists.PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV)
expected = playlists.PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV_SORTED.replace(', IV', ',IV').strip()
filename = str(tmpdir.join('subdir1', 'subdir2', 'playlist.m3u8'))
obj.dump(filename)
assert_file_content(filename, expected)
def test_dump_should_work_for_variant_streams():
obj = m3u8.M3U8(playlists.VARIANT_PLAYLIST)
expected = playlists.VARIANT_PLAYLIST.replace(', BANDWIDTH', ',BANDWIDTH').strip()
assert expected == obj.dumps().strip()
def test_dump_should_work_for_variant_playlists_with_iframe_playlists():
obj = m3u8.M3U8(playlists.VARIANT_PLAYLIST_WITH_IFRAME_PLAYLISTS)
expected = playlists.VARIANT_PLAYLIST_WITH_IFRAME_PLAYLISTS.strip()
assert expected == obj.dumps().strip()
def test_dump_should_work_for_iframe_playlists():
obj = m3u8.M3U8(playlists.IFRAME_PLAYLIST)
expected = playlists.IFRAME_PLAYLIST.strip()
assert expected == obj.dumps().strip()
obj = m3u8.M3U8(playlists.IFRAME_PLAYLIST2)
expected = playlists.IFRAME_PLAYLIST.strip()
# expected that dump will reverse EXTINF and EXT-X-BYTERANGE,
# hence IFRAME_PLAYLIST dump from IFRAME_PLAYLIST2 parse.
assert expected == obj.dumps().strip()
obj = m3u8.M3U8(playlists.IFRAME_PLAYLIST2)
expected = playlists.IFRAME_PLAYLIST.strip()
# expected that dump will reverse EXTINF and EXT-X-BYTERANGE,
# hence IFRAME_PLAYLIST dump from IFRAME_PLAYLIST2 parse.
assert expected == obj.dumps().strip()
def test_dump_should_include_program_date_time():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST_WITH_PROGRAM_DATE_TIME)
assert "EXT-X-PROGRAM-DATE-TIME:2014-08-13T13:36:33+00:00" in obj.dumps().strip()
def test_dump_should_work_for_playlists_using_byteranges():
obj = m3u8.M3U8(playlists.PLAYLIST_USING_BYTERANGES)
expected = playlists.PLAYLIST_USING_BYTERANGES.strip()
assert expected == obj.dumps().strip()
def test_should_dump_with_endlist_tag():
obj = m3u8.M3U8(playlists.SLIDING_WINDOW_PLAYLIST)
obj.is_endlist = True
assert '#EXT-X-ENDLIST' in obj.dumps().splitlines()
def test_should_dump_without_endlist_tag():
obj = m3u8.M3U8(playlists.SIMPLE_PLAYLIST)
obj.is_endlist = False
expected = playlists.SIMPLE_PLAYLIST.strip().splitlines()
expected.remove('#EXT-X-ENDLIST')
assert expected == obj.dumps().strip().splitlines()
def test_should_dump_multiple_keys():
obj = m3u8.M3U8(playlists.PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV_WITH_MULTIPLE_KEYS)
expected = playlists.PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV_WITH_MULTIPLE_KEYS_SORTED.strip()
assert expected == obj.dumps().strip()
def test_should_dump_unencrypted_encrypted_keys_together():
obj = m3u8.M3U8(playlists.PLAYLIST_WITH_MULTIPLE_KEYS_UNENCRYPTED_AND_ENCRYPTED)
expected = playlists.PLAYLIST_WITH_MULTIPLE_KEYS_UNENCRYPTED_AND_ENCRYPTED.strip()
assert expected == obj.dumps().strip()
def test_should_dump_complex_unencrypted_encrypted_keys():
obj = m3u8.M3U8(playlists.PLAYLIST_WITH_MULTIPLE_KEYS_UNENCRYPTED_AND_ENCRYPTED_NONE)
expected = playlists.PLAYLIST_WITH_MULTIPLE_KEYS_UNENCRYPTED_AND_ENCRYPTED_NONE \
.replace('METHOD=NONE,URI=""', 'METHOD=NONE') \
.strip()
assert expected == obj.dumps().strip()
def test_should_dump_complex_unencrypted_encrypted_keys_no_uri_attr():
obj = m3u8.M3U8(playlists.PLAYLIST_WITH_MULTIPLE_KEYS_UNENCRYPTED_AND_ENCRYPTED_NONE_AND_NO_URI_ATTR)
expected = playlists.PLAYLIST_WITH_MULTIPLE_KEYS_UNENCRYPTED_AND_ENCRYPTED_NONE_AND_NO_URI_ATTR \
.strip()
assert expected == obj.dumps().strip()
def test_length_segments_by_key():
obj = m3u8.M3U8(playlists.PLAYLIST_WITH_MULTIPLE_KEYS_UNENCRYPTED_AND_ENCRYPTED)
assert len(obj.segments.by_key(obj.keys[0])) == 2
assert len(obj.segments.by_key(obj.keys[1])) == 4
assert len(obj.segments.by_key(obj.keys[2])) == 2
def test_list_segments_by_key():
obj = m3u8.M3U8(playlists.PLAYLIST_WITH_MULTIPLE_KEYS_UNENCRYPTED_AND_ENCRYPTED)
# unencrypted segments
segments = obj.segments.by_key(None)
expected = "../../../../hls/streamNum82400.ts\n../../../../hls/streamNum82401.ts"
output = [ segment.uri for segment in segments ]
assert "\n".join(output).strip() == expected.strip()
# segments for last key
segments = obj.segments.by_key(obj.keys[2])
expected = "../../../../hls/streamNum82404.ts\n../../../../hls/streamNum82405.ts"
output = [ segment.uri for segment in segments ]
assert "\n".join(output).strip() == expected.strip()
def test_replace_segment_key():
obj = m3u8.M3U8(playlists.PLAYLIST_WITH_MULTIPLE_KEYS_UNENCRYPTED_AND_ENCRYPTED)
# Replace unencrypted segments with new key
new_key = Key("AES-128", "/hls-key/key0.bin", None, iv="0Xcafe8f758ca555115584bb5b3c687f52")
for segment in obj.segments.by_key(None):
segment.key = new_key
# Check dump
expected = playlists.PLAYLIST_WITH_MULTIPLE_KEYS_UNENCRYPTED_AND_ENCRYPTED_UPDATED.strip()
assert obj.dumps().strip() == expected
def test_should_dump_program_datetime_and_discontinuity():
obj = m3u8.M3U8(playlists.DISCONTINUITY_PLAYLIST_WITH_PROGRAM_DATE_TIME)
expected = playlists.DISCONTINUITY_PLAYLIST_WITH_PROGRAM_DATE_TIME.strip()
assert expected == obj.dumps().strip()
def test_should_normalize_segments_and_key_urls_if_base_path_passed_to_constructor():
base_path = 'http://videoserver.com/hls/live'
obj = m3u8.M3U8(playlists.PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV, base_path)
expected = playlists.PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV_SORTED \
.replace(', IV', ',IV') \
.replace('../../../../hls', base_path) \
.replace('/hls-key', base_path) \
.strip()
assert obj.dumps().strip() == expected
def test_should_normalize_variant_streams_urls_if_base_path_passed_to_constructor():
base_path = 'http://videoserver.com/hls/live'
obj = m3u8.M3U8(playlists.VARIANT_PLAYLIST, base_path)
expected = playlists.VARIANT_PLAYLIST \
.replace(', BANDWIDTH', ',BANDWIDTH') \
.replace('http://example.com', base_path) \
.strip()
assert obj.dumps().strip() == expected
def test_should_normalize_segments_and_key_urls_if_base_path_attribute_updated():
base_path = 'http://videoserver.com/hls/live'
obj = m3u8.M3U8(playlists.PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV)
obj.base_path = base_path # update later
expected = playlists.PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV_SORTED \
.replace(', IV', ',IV') \
.replace('../../../../hls', base_path) \
.replace('/hls-key', base_path) \
.strip()
assert obj.dumps() == expected
def test_should_normalize_segments_and_key_urls_if_base_path_attribute_updated():
base_path = 'http://videoserver.com/hls/live'
obj = m3u8.M3U8(playlists.PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV)
obj.base_path = base_path
expected = playlists.PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV_SORTED \
.replace(', IV', ',IV') \
.replace('../../../../hls', base_path) \
.replace('/hls-key', base_path) \
.strip()
assert obj.dumps().strip() == expected
def test_playlist_type_dumped_to_appropriate_m3u8_field():
obj = m3u8.M3U8()
obj.playlist_type = 'vod'
result = obj.dumps()
expected = '#EXTM3U\n#EXT-X-PLAYLIST-TYPE:VOD\n'
assert result == expected
def test_empty_playlist_type_is_gracefully_ignored():
obj = m3u8.M3U8()
obj.playlist_type = ''
result = obj.dumps()
expected = '#EXTM3U\n'
assert result == expected
def test_none_playlist_type_is_gracefully_ignored():
obj = m3u8.M3U8()
obj.playlist_type = None
result = obj.dumps()
expected = '#EXTM3U\n'
assert result == expected
def test_0_media_sequence_added_to_file():
obj = m3u8.M3U8()
obj.media_sequence = 0
result = obj.dumps()
expected = '#EXTM3U\n'
assert result == expected
def test_none_media_sequence_gracefully_ignored():
obj = m3u8.M3U8()
obj.media_sequence = None
result = obj.dumps()
expected = '#EXTM3U\n'
assert result == expected
def test_should_correctly_update_base_path_if_its_blank():
segment = Segment('entire.ts', 'http://1.2/')
assert not segment.base_path
segment.base_path = "base_path"
assert "http://1.2/base_path/entire.ts" == segment.absolute_uri
def test_m3u8_should_propagate_base_uri_to_segments():
with open(playlists.RELATIVE_PLAYLIST_FILENAME) as f:
content = f.read()
obj = m3u8.M3U8(content, base_uri='/any/path')
assert '/entire1.ts' == obj.segments[0].uri
assert '/any/path/entire1.ts' == obj.segments[0].absolute_uri
assert 'entire4.ts' == obj.segments[3].uri
assert '/any/path/entire4.ts' == obj.segments[3].absolute_uri
obj.base_uri = '/any/where/'
assert '/entire1.ts' == obj.segments[0].uri
assert '/any/where/entire1.ts' == obj.segments[0].absolute_uri
assert 'entire4.ts' == obj.segments[3].uri
assert '/any/where/entire4.ts' == obj.segments[3].absolute_uri
def test_m3u8_should_propagate_base_uri_to_key():
with open(playlists.RELATIVE_PLAYLIST_FILENAME) as f:
content = f.read()
obj = m3u8.M3U8(content, base_uri='/any/path')
assert '../key.bin' == obj.keys[0].uri
assert '/any/key.bin' == obj.keys[0].absolute_uri
obj.base_uri = '/any/where/'
assert '../key.bin' == obj.keys[0].uri
assert '/any/key.bin' == obj.keys[0].absolute_uri
# custom asserts
def assert_file_content(filename, expected):
with open(filename) as fileobj:
content = fileobj.read().strip()
assert content == expected
# helpers
def mock_parser_data(m3u8_obj, data):
data.setdefault('segments', [])
m3u8_obj.data = data
m3u8_obj._initialize_attributes()
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import logging
import sys
import traceback
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
import oslo_messaging
from oslo_messaging._i18n import _
from oslo_messaging._i18n import _LE
from oslo_messaging import _utils as utils
LOG = logging.getLogger(__name__)
_EXCEPTIONS_MODULE = 'exceptions' if six.PY2 else 'builtins'
'''RPC Envelope Version.
This version number applies to the top level structure of messages sent out.
It does *not* apply to the message payload, which must be versioned
independently. For example, when using rpc APIs, a version number is applied
for changes to the API being exposed over rpc. This version number is handled
in the rpc proxy and dispatcher modules.
This version number applies to the message envelope that is used in the
serialization done inside the rpc layer. See serialize_msg() and
deserialize_msg().
The current message format (version 2.0) is very simple. It is:
{
'oslo.version': <RPC Envelope Version as a String>,
'oslo.message': <Application Message Payload, JSON encoded>
}
Message format version '1.0' is just considered to be the messages we sent
without a message envelope.
So, the current message envelope just includes the envelope version. It may
eventually contain additional information, such as a signature for the message
payload.
We will JSON encode the application message payload. The message envelope,
which includes the JSON encoded application message body, will be passed down
to the messaging libraries as a dict.
'''
_RPC_ENVELOPE_VERSION = '2.0'
_VERSION_KEY = 'oslo.version'
_MESSAGE_KEY = 'oslo.message'
_REMOTE_POSTFIX = '_Remote'
class RPCException(Exception):
msg_fmt = _("An unknown RPC related exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in six.iteritems(kwargs):
LOG.error("%s: %s", name, value)
# at least get the core message out if something happened
message = self.msg_fmt
super(RPCException, self).__init__(message)
class Timeout(RPCException):
"""Signifies that a timeout has occurred.
This exception is raised if the rpc_response_timeout is reached while
waiting for a response from the remote side.
"""
msg_fmt = _('Timeout while waiting on RPC response - '
'topic: "%(topic)s", RPC method: "%(method)s" '
'info: "%(info)s"')
def __init__(self, info=None, topic=None, method=None):
"""Initiates Timeout object.
:param info: Extra info to convey to the user
:param topic: The topic that the rpc call was sent to
:param rpc_method_name: The name of the rpc method being
called
"""
self.info = info
self.topic = topic
self.method = method
super(Timeout, self).__init__(
None,
info=info or _('<unknown>'),
topic=topic or _('<unknown>'),
method=method or _('<unknown>'))
class DuplicateMessageError(RPCException):
msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
class InvalidRPCConnectionReuse(RPCException):
msg_fmt = _("Invalid reuse of an RPC connection.")
class UnsupportedRpcVersion(RPCException):
msg_fmt = _("Specified RPC version, %(version)s, not supported by "
"this endpoint.")
class UnsupportedRpcEnvelopeVersion(RPCException):
msg_fmt = _("Specified RPC envelope version, %(version)s, "
"not supported by this endpoint.")
class RpcVersionCapError(RPCException):
msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
class Connection(object):
"""A connection, returned by rpc.create_connection().
This class represents a connection to the message bus used for rpc.
An instance of this class should never be created by users of the rpc API.
Use rpc.create_connection() instead.
"""
def close(self):
"""Close the connection.
This method must be called when the connection will no longer be used.
It will ensure that any resources associated with the connection, such
as a network connection, and cleaned up.
"""
raise NotImplementedError()
def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_LE("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = six.text_type(failure.__class__.__name__)
mod_name = six.text_type(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = {
'class': cls_name,
'module': mod_name,
'message': six.text_type(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data
def deserialize_remote_exception(data, allowed_remote_exmods):
failure = jsonutils.loads(six.text_type(data))
trace = failure.get('tb', [])
message = failure.get('message', "") + "\n" + "\n".join(trace)
name = failure.get('class')
module = failure.get('module')
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
# order to prevent arbitrary code execution.
if module != _EXCEPTIONS_MODULE and module not in allowed_remote_exmods:
return oslo_messaging.RemoteError(name, failure.get('message'), trace)
try:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
if not issubclass(klass, Exception):
raise TypeError("Can only deserialize Exceptions")
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
except (AttributeError, TypeError, ImportError):
return oslo_messaging.RemoteError(name, failure.get('message'), trace)
ex_type = type(failure)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
{'__str__': str_override, '__unicode__': str_override})
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
try:
# NOTE(ameade): Dynamically create a new exception type and swap it in
# as the new type for the exception. This only works on user defined
# Exceptions and not core Python exceptions. This is important because
# we cannot necessarily change an exception message so we must override
# the __str__ method.
failure.__class__ = new_ex_type
except TypeError:
# NOTE(ameade): If a core exception then just add the traceback to the
# first exception argument.
failure.args = (message,) + failure.args[1:]
return failure
class CommonRpcContext(object):
def __init__(self, **kwargs):
self.values = kwargs
def __getattr__(self, key):
try:
return self.values[key]
except KeyError:
raise AttributeError(key)
def to_dict(self):
return copy.deepcopy(self.values)
@classmethod
def from_dict(cls, values):
return cls(**values)
def deepcopy(self):
return self.from_dict(self.to_dict())
def update_store(self):
# local.store.context = self
pass
class ClientException(Exception):
"""Encapsulates actual exception expected to be hit by a RPC proxy object.
Merely instantiating it records the current exception information, which
will be passed back to the RPC client without exceptional logging.
"""
def __init__(self):
self._exc_info = sys.exc_info()
def serialize_msg(raw_msg):
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
# information about this format.
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
_MESSAGE_KEY: jsonutils.dumps(raw_msg)}
return msg
def deserialize_msg(msg):
# NOTE(russellb): Hang on to your hats, this road is about to
# get a little bumpy.
#
# Robustness Principle:
# "Be strict in what you send, liberal in what you accept."
#
# At this point we have to do a bit of guessing about what it
# is we just received. Here is the set of possibilities:
#
# 1) We received a dict. This could be 2 things:
#
# a) Inspect it to see if it looks like a standard message envelope.
# If so, great!
#
# b) If it doesn't look like a standard message envelope, it could either
# be a notification, or a message from before we added a message
# envelope (referred to as version 1.0).
# Just return the message as-is.
#
# 2) It's any other non-dict type. Just return it and hope for the best.
# This case covers return values from rpc.call() from before message
# envelopes were used. (messages to call a method were always a dict)
if not isinstance(msg, dict):
# See #2 above.
return msg
base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
if not all(map(lambda key: key in msg, base_envelope_keys)):
# See #1.b above.
return msg
# At this point we think we have the message envelope
# format we were expecting. (#1.a above)
if not utils.version_is_compatible(_RPC_ENVELOPE_VERSION,
msg[_VERSION_KEY]):
raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
return raw_msg
class DecayingTimer(object):
def __init__(self, duration=None):
self._watch = timeutils.StopWatch(duration=duration)
def start(self):
self._watch.start()
def check_return(self, timeout_callback=None, *args, **kwargs):
maximum = kwargs.pop('maximum', None)
left = self._watch.leftover(return_none=True)
if left is None:
return maximum
if left <= 0 and timeout_callback is not None:
timeout_callback(*args, **kwargs)
return left if maximum is None else min(left, maximum)
# NOTE(sileht): Even if rabbit has only one Connection class,
# this connection can be used for two purposes:
# * wait and receive amqp messages (only do read stuffs on the socket)
# * send messages to the broker (only do write stuffs on the socket)
# The code inside a connection class is not concurrency safe.
# Using one Connection class instance for doing both, will result
# of eventlet complaining of multiple greenthreads that read/write the
# same fd concurrently... because 'send' and 'listen' run in different
# greenthread.
# So, a connection cannot be shared between thread/greenthread and
# this two variables permit to define the purpose of the connection
# to allow drivers to add special handling if needed (like heatbeat).
# amqp drivers create 3 kind of connections:
# * driver.listen*(): each call create a new 'PURPOSE_LISTEN' connection
# * driver.send*(): a pool of 'PURPOSE_SEND' connections is used
# * driver internally have another 'PURPOSE_LISTEN' connection dedicated
# to wait replies of rpc call
PURPOSE_LISTEN = 'listen'
PURPOSE_SEND = 'send'
class ConnectionContext(Connection):
"""The class that is actually returned to the create_connection() caller.
This is essentially a wrapper around Connection that supports 'with'.
It can also return a new Connection, or one from a pool.
The function will also catch when an instance of this class is to be
deleted. With that we can return Connections to the pool on exceptions
and so forth without making the caller be responsible for catching them.
If possible the function makes sure to return a connection to the pool.
"""
def __init__(self, connection_pool, purpose):
"""Create a new connection, or get one from the pool."""
self.connection = None
self.connection_pool = connection_pool
pooled = purpose == PURPOSE_SEND
if pooled:
self.connection = connection_pool.get()
else:
# a non-pooled connection is requested, so create a new connection
self.connection = connection_pool.create(purpose)
self.pooled = pooled
self.connection.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self."""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
try:
self.connection.reset()
except Exception:
LOG.exception("Fail to reset the connection, drop it")
try:
self.connection.close()
except Exception:
pass
self.connection = self.connection_pool.create()
finally:
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance."""
if self.connection:
return getattr(self.connection, key)
else:
raise InvalidRPCConnectionReuse()
|
|
from corehq.apps.accounting.utils import domain_is_on_trial
from corehq.apps.reminders.models import (Message, METHOD_SMS,
METHOD_SMS_CALLBACK, METHOD_SMS_SURVEY, METHOD_IVR_SURVEY,
METHOD_EMAIL, CaseReminderHandler, EmailUsage)
from corehq.apps.groups.models import Group
from corehq.apps.hqwebapp.tasks import send_mail_async
from corehq.apps.ivr.models import Call
from corehq.apps.locations.models import Location
from corehq.apps.reminders.util import get_unverified_number_for_recipient
from corehq.apps.smsforms.app import submit_unfinished_form
from corehq.apps.smsforms.models import get_session_by_session_id, SQLXFormsSession
from touchforms.formplayer.api import current_question, TouchformsError
from corehq.apps.sms.api import (
send_sms, send_sms_to_verified_number, MessageMetadata
)
from corehq.apps.smsforms.app import start_session
from corehq.apps.smsforms.util import form_requires_input
from corehq.apps.sms.util import format_message_list, touchforms_error_is_config_error
from corehq.apps.users.cases import get_owner_id, get_wrapped_owner
from corehq.apps.users.models import CouchUser, WebUser, CommCareUser
from corehq.apps.domain.models import Domain
from corehq.apps.sms.models import (
ExpectedCallback, CALLBACK_PENDING, CALLBACK_RECEIVED,
CALLBACK_MISSED, WORKFLOW_REMINDER, WORKFLOW_KEYWORD, WORKFLOW_BROADCAST,
WORKFLOW_CALLBACK, MessagingEvent,
)
from django.conf import settings
from corehq.apps.app_manager.models import Form
from corehq.apps.ivr.tasks import initiate_outbound_call
from corehq.form_processor.utils import is_commcarecase
from dimagi.utils.couch import CriticalSection
from django.utils.translation import ugettext_noop
from dimagi.utils.modules import to_function
TRIAL_MAX_EMAILS = 50
ERROR_RENDERING_MESSAGE = ugettext_noop("Error rendering templated message for language '%s'. Please check message syntax.")
ERROR_NO_VERIFIED_NUMBER = ugettext_noop("Recipient has no phone number.")
ERROR_NO_OTHER_NUMBERS = ugettext_noop("Recipient has no phone number.")
ERROR_FORM = ugettext_noop("Can't load form. Please check configuration.")
ERROR_NO_RECIPIENTS = ugettext_noop("No recipient(s).")
ERROR_FINDING_CUSTOM_CONTENT_HANDLER = ugettext_noop("Error looking up custom content handler.")
ERROR_INVALID_CUSTOM_CONTENT_HANDLER = ugettext_noop("Invalid custom content handler.")
"""
This module defines the methods that will be called from CaseReminderHandler.fire()
when a reminder event fires.
Each method accepts the following parameters:
reminder The CaseReminder which is being fired. Use reminder.current_event
to see the specific event which is being fired.
handler The CaseReminderHandler which defines the rules / schedule for
the reminder.
recipients A list of recipients to send the content to. At the moment, this
will be list of CommCareUsers or CommCareCases.
verified_numbers A dictionary of recipient.get_id : <first non-pending verified number>
If the recipient doesn't have a verified PhoneNumber entry, None is the
corresponding value.
Any changes to the reminder object made by the event handler method will be saved
after the method returns.
"""
def get_workflow(handler):
from corehq.apps.reminders.models import REMINDER_TYPE_ONE_TIME, REMINDER_TYPE_KEYWORD_INITIATED
if handler.reminder_type == REMINDER_TYPE_ONE_TIME:
return WORKFLOW_BROADCAST
elif handler.reminder_type == REMINDER_TYPE_KEYWORD_INITIATED:
return WORKFLOW_KEYWORD
else:
return WORKFLOW_REMINDER
def get_recipient_phone_number(reminder, recipient, verified_numbers):
verified_number = verified_numbers.get(recipient.get_id, None)
unverified_number = None
if verified_number is None:
unverified_number = get_unverified_number_for_recipient(recipient)
return (verified_number, unverified_number)
def _get_case_template_info(case):
return case.to_json()
def _get_web_user_template_info(user):
return {
'name': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
}
def _get_mobile_user_template_info(user):
return {
'name': user.raw_username,
'first_name': user.first_name,
'last_name': user.last_name,
}
def _get_group_template_info(group):
return {
'name': group.name,
}
def _get_location_template_info(location):
return {
'name': location.name,
'site_code': location.site_code,
}
def _get_obj_template_info(obj):
if is_commcarecase(obj):
return _get_case_template_info(obj)
elif isinstance(obj, WebUser):
return _get_web_user_template_info(obj)
elif isinstance(obj, CommCareUser):
return _get_mobile_user_template_info(obj)
elif isinstance(obj, Group):
return _get_group_template_info(obj)
elif isinstance(obj, Location):
return _get_location_template_info(obj)
return {}
def _add_case_to_template_params(case, result):
result['case'] = _get_obj_template_info(case)
def _add_parent_case_to_template_params(case, result):
parent_case = case.parent
if parent_case:
result['case']['parent'] = _get_obj_template_info(parent_case)
def _add_owner_to_template_params(case, result):
owner = get_wrapped_owner(get_owner_id(case))
if owner:
result['case']['owner'] = _get_obj_template_info(owner)
def _add_modified_by_to_template_params(case, result):
try:
modified_by = CouchUser.get_by_user_id(case.modified_by)
except KeyError:
return
if modified_by:
result['case']['last_modified_by'] = _get_obj_template_info(modified_by)
def get_message_template_params(case=None):
"""
Data such as case properties can be referenced from reminder messages
such as {case.name} which references the case's name. Add to this result
all data that can be referenced from a reminder message.
The result is a dictionary where each key is the object's name and each
value is a dictionary of attributes to be referenced. Dictionaries can
also be nested, so a result here of {"case": {"parent": {"name": "joe"}}}
allows you to reference {case.parent.name} in a reminder message.
At the moment, the result here is of this structure:
{
"case": {
...key:value case properties...
"parent": {
...key:value parent case properties...
}
}
"owner": ... dict with selected info for the case owner ...
"last_modified_by": ... dict with selected info for the user who last modified the case ...
}
"""
result = {}
if case:
_add_case_to_template_params(case, result)
_add_parent_case_to_template_params(case, result)
_add_owner_to_template_params(case, result)
_add_modified_by_to_template_params(case, result)
return result
def get_custom_content_handler(handler, logged_event):
content_handler = None
if handler.custom_content_handler:
if handler.custom_content_handler in settings.ALLOWED_CUSTOM_CONTENT_HANDLERS:
try:
content_handler = to_function(
settings.ALLOWED_CUSTOM_CONTENT_HANDLERS[handler.custom_content_handler])
except Exception:
logged_event.error(MessagingEvent.ERROR_CANNOT_LOAD_CUSTOM_CONTENT_HANDLER)
else:
logged_event.error(MessagingEvent.ERROR_INVALID_CUSTOM_CONTENT_HANDLER)
return (handler.custom_content_handler is not None, content_handler)
def fire_sms_event(reminder, handler, recipients, verified_numbers, logged_event, workflow=None):
current_event = reminder.current_event
case = reminder.case
template_params = get_message_template_params(case)
uses_custom_content_handler, content_handler = get_custom_content_handler(handler, logged_event)
if uses_custom_content_handler and not content_handler:
return
domain_obj = Domain.get_by_name(reminder.domain, strict=True)
for recipient in recipients:
logged_subevent = logged_event.create_subevent(handler, reminder, recipient)
try:
lang = recipient.get_language_code()
except Exception:
lang = None
if content_handler:
message = content_handler(reminder, handler, recipient)
else:
message = current_event.message.get(lang, current_event.message[handler.default_lang])
try:
message = Message.render(message, **template_params)
except Exception:
logged_subevent.error(MessagingEvent.ERROR_CANNOT_RENDER_MESSAGE)
continue
verified_number, unverified_number = get_recipient_phone_number(
reminder, recipient, verified_numbers)
if message:
metadata = MessageMetadata(
workflow=workflow or get_workflow(handler),
reminder_id=reminder._id,
messaging_subevent_id=logged_subevent.pk,
)
if verified_number is not None:
send_sms_to_verified_number(verified_number,
message, metadata, logged_subevent=logged_subevent)
elif isinstance(recipient, CouchUser) and unverified_number:
send_sms(reminder.domain, recipient, unverified_number,
message, metadata)
elif (is_commcarecase(recipient) and unverified_number and
domain_obj.send_to_duplicated_case_numbers):
send_sms(reminder.domain, recipient, unverified_number,
message, metadata)
else:
logged_subevent.error(MessagingEvent.ERROR_NO_PHONE_NUMBER)
continue
logged_subevent.completed()
def fire_sms_callback_event(reminder, handler, recipients, verified_numbers, logged_event):
current_event = reminder.current_event
for recipient in recipients:
send_message = False
if reminder.callback_try_count > 0:
if reminder.event_initiation_timestamp:
event = ExpectedCallback.by_domain_recipient_date(
reminder.domain,
recipient.get_id,
reminder.event_initiation_timestamp
)
if not event:
continue
if event.status == CALLBACK_RECEIVED:
continue
if Call.inbound_entry_exists(
recipient.doc_type,
recipient.get_id,
reminder.event_initiation_timestamp
):
event.status = CALLBACK_RECEIVED
event.save()
continue
else:
continue
if (reminder.callback_try_count >=
len(current_event.callback_timeout_intervals)):
# On the last callback timeout, instead of sending the SMS
# again, log the missed callback
if event:
event.status = CALLBACK_MISSED
event.save()
else:
send_message = True
else:
# It's the first time sending the sms, so create an expected
# callback event
send_message = True
event = ExpectedCallback.objects.create(
domain=reminder.domain,
date=reminder.event_initiation_timestamp,
couch_recipient_doc_type=recipient.doc_type,
couch_recipient=recipient.get_id,
status=CALLBACK_PENDING,
)
if send_message:
fire_sms_event(reminder, handler, [recipient], verified_numbers,
logged_event, workflow=WORKFLOW_CALLBACK)
def fire_sms_survey_event(reminder, handler, recipients, verified_numbers, logged_event):
if reminder.callback_try_count > 0:
# Handle timeouts
if handler.submit_partial_forms and (reminder.callback_try_count == len(reminder.current_event.callback_timeout_intervals)):
# Submit partial form completions
for session_id in reminder.xforms_session_ids:
submit_unfinished_form(session_id, handler.include_case_side_effects)
else:
# Resend current question
for session_id in reminder.xforms_session_ids:
session = get_session_by_session_id(session_id)
if session.end_time is None:
vn = verified_numbers.get(session.connection_id)
if vn is not None:
metadata = MessageMetadata(
workflow=get_workflow(handler),
reminder_id=reminder._id,
xforms_session_couch_id=session._id,
)
resp = current_question(session_id)
send_sms_to_verified_number(vn, resp.event.text_prompt, metadata,
logged_subevent=session.related_subevent)
else:
reminder.xforms_session_ids = []
domain_obj = Domain.get_by_name(reminder.domain, strict=True)
# Get the app, module, and form
try:
form_unique_id = reminder.current_event.form_unique_id
form = Form.get_form(form_unique_id)
app = form.get_app()
module = form.get_module()
except Exception:
logged_event.error(MessagingEvent.ERROR_CANNOT_FIND_FORM)
return
# Start a touchforms session for each recipient
for recipient in recipients:
logged_subevent = logged_event.create_subevent(handler, reminder, recipient)
verified_number, unverified_number = get_recipient_phone_number(
reminder, recipient, verified_numbers)
no_verified_number = verified_number is None
cant_use_unverified_number = (unverified_number is None or
not domain_obj.send_to_duplicated_case_numbers or
form_requires_input(form))
if no_verified_number and cant_use_unverified_number:
logged_subevent.error(MessagingEvent.ERROR_NO_TWO_WAY_PHONE_NUMBER)
continue
key = "start-sms-survey-for-contact-%s" % recipient.get_id
with CriticalSection([key], timeout=60):
# Get the case to submit the form against, if any
if (is_commcarecase(recipient) and
not handler.force_surveys_to_use_triggered_case):
case_id = recipient.case_id
else:
case_id = reminder.case_id
if form.requires_case() and not case_id:
logged_subevent.error(MessagingEvent.ERROR_NO_CASE_GIVEN)
continue
# Close all currently open sessions
SQLXFormsSession.close_all_open_sms_sessions(reminder.domain, recipient.get_id)
# Start the new session
try:
session, responses = start_session(reminder.domain, recipient,
app, module, form, case_id,
case_for_case_submission=handler.force_surveys_to_use_triggered_case)
except TouchformsError as e:
human_readable_message = e.response_data.get('human_readable_message', None)
logged_subevent.error(MessagingEvent.ERROR_TOUCHFORMS_ERROR,
additional_error_text=human_readable_message)
if touchforms_error_is_config_error(e):
# Don't reraise the exception because this means there are configuration
# issues with the form that need to be fixed
continue
else:
# Reraise the exception so that the framework retries it again later
raise
except Exception as e:
logged_subevent.error(MessagingEvent.ERROR_TOUCHFORMS_ERROR)
# Reraise the exception so that the framework retries it again later
raise
session.survey_incentive = handler.survey_incentive
session.workflow = get_workflow(handler)
session.reminder_id = reminder._id
session.save()
reminder.xforms_session_ids.append(session.session_id)
logged_subevent.xforms_session = session
logged_subevent.save()
# Send out first message
if len(responses) > 0:
message = format_message_list(responses)
metadata = MessageMetadata(
workflow=get_workflow(handler),
reminder_id=reminder._id,
xforms_session_couch_id=session._id,
)
if verified_number:
send_sms_to_verified_number(verified_number, message, metadata,
logged_subevent=logged_subevent)
else:
send_sms(reminder.domain, recipient, unverified_number,
message, metadata)
logged_subevent.completed()
def fire_ivr_survey_event(reminder, handler, recipients, verified_numbers, logged_event):
domain_obj = Domain.get_by_name(reminder.domain, strict=True)
for recipient in recipients:
initiate_call = True
if reminder.callback_try_count > 0 and reminder.event_initiation_timestamp:
initiate_call = not Call.answered_call_exists(
recipient.doc_type,
recipient.get_id,
reminder.event_initiation_timestamp
)
if initiate_call:
if (is_commcarecase(recipient) and
not handler.force_surveys_to_use_triggered_case):
case_id = recipient.case_id
else:
case_id = reminder.case_id
verified_number, unverified_number = get_recipient_phone_number(
reminder, recipient, verified_numbers)
if verified_number:
initiate_outbound_call.delay(
recipient,
reminder.current_event.form_unique_id,
handler.submit_partial_forms,
handler.include_case_side_effects,
handler.max_question_retries,
logged_event.pk,
verified_number=verified_number,
case_id=case_id,
case_for_case_submission=handler.force_surveys_to_use_triggered_case,
timestamp=CaseReminderHandler.get_now(),
)
elif domain_obj.send_to_duplicated_case_numbers and unverified_number:
initiate_outbound_call.delay(
recipient,
reminder.current_event.form_unique_id,
handler.submit_partial_forms,
handler.include_case_side_effects,
handler.max_question_retries,
logged_event.pk,
unverified_number=unverified_number,
case_id=case_id,
case_for_case_submission=handler.force_surveys_to_use_triggered_case,
timestamp=CaseReminderHandler.get_now(),
)
else:
# initiate_outbound_call will create the subevent automatically,
# so since we're not initiating the call here, we have to create
# the subevent explicitly in order to log the error.
logged_subevent = logged_event.create_subevent(handler, reminder, recipient)
logged_subevent.error(MessagingEvent.ERROR_NO_PHONE_NUMBER)
def fire_email_event(reminder, handler, recipients, verified_numbers, logged_event):
current_event = reminder.current_event
case = reminder.case
template_params = get_message_template_params(case)
email_usage = EmailUsage.get_or_create_usage_record(reminder.domain)
is_trial = domain_is_on_trial(reminder.domain)
uses_custom_content_handler, content_handler = get_custom_content_handler(handler, logged_event)
if uses_custom_content_handler and not content_handler:
return
for recipient in recipients:
logged_subevent = logged_event.create_subevent(handler, reminder, recipient)
try:
lang = recipient.get_language_code()
except Exception:
lang = None
if content_handler:
subject, message = content_handler(reminder, handler, recipient)
else:
subject = current_event.subject.get(lang, current_event.subject[handler.default_lang])
message = current_event.message.get(lang, current_event.message[handler.default_lang])
try:
subject = Message.render(subject, **template_params)
message = Message.render(message, **template_params)
except Exception:
logged_subevent.error(MessagingEvent.ERROR_CANNOT_RENDER_MESSAGE)
continue
subject = subject or '(No Subject)'
if message:
try:
email_address = recipient.get_email()
except:
email_address = None
if email_address:
if is_trial and EmailUsage.get_total_count(reminder.domain) >= TRIAL_MAX_EMAILS:
logged_subevent.error(MessagingEvent.ERROR_TRIAL_EMAIL_LIMIT_REACHED)
continue
send_mail_async.delay(subject, message, settings.DEFAULT_FROM_EMAIL, [email_address])
email_usage.update_count()
else:
logged_subevent.error(MessagingEvent.ERROR_NO_EMAIL_ADDRESS)
continue
logged_subevent.completed()
# The dictionary which maps an event type to its event handling method
EVENT_HANDLER_MAP = {
METHOD_SMS: fire_sms_event,
METHOD_SMS_CALLBACK: fire_sms_callback_event,
METHOD_SMS_SURVEY: fire_sms_survey_event,
METHOD_IVR_SURVEY: fire_ivr_survey_event,
METHOD_EMAIL: fire_email_event,
}
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import re
import string
import six
from nova import exception
from nova.i18n import _
from nova.pci import utils
MAX_VENDOR_ID = 0xFFFF
MAX_PRODUCT_ID = 0xFFFF
MAX_FUNC = 0x7
MAX_DOMAIN = 0xFFFF
MAX_BUS = 0xFF
MAX_SLOT = 0x1F
ANY = '*'
REGEX_ANY = '.*'
def get_pci_dev_info(pci_obj, property, max, hex_value):
a = getattr(pci_obj, property)
if a == ANY:
return
try:
v = int(a, 16)
except ValueError:
raise exception.PciConfigInvalidWhitelist(
reason = "invalid %s %s" % (property, a))
if v > max:
raise exception.PciConfigInvalidWhitelist(
reason=_("invalid %(property)s %(attr)s") %
{'property': property, 'attr': a})
setattr(pci_obj, property, hex_value % v)
@six.add_metaclass(abc.ABCMeta)
class PciAddressSpec(object):
"""Abstract class for all PCI address spec styles
This class checks the address fields of the pci.passthrough_whitelist
"""
@abc.abstractmethod
def match(self, pci_addr):
pass
def is_single_address(self):
return all([
all(c in string.hexdigits for c in self.domain),
all(c in string.hexdigits for c in self.bus),
all(c in string.hexdigits for c in self.slot),
all(c in string.hexdigits for c in self.func)])
class PhysicalPciAddress(PciAddressSpec):
"""Manages the address fields for a fully-qualified PCI address.
This function class will validate the address fields for a single
PCI device.
"""
def __init__(self, pci_addr):
try:
if isinstance(pci_addr, dict):
self.domain = pci_addr['domain']
self.bus = pci_addr['bus']
self.slot = pci_addr['slot']
self.func = pci_addr['function']
else:
self.domain, self.bus, self.slot, self.func = (
utils.get_pci_address_fields(pci_addr))
get_pci_dev_info(self, 'func', MAX_FUNC, '%1x')
get_pci_dev_info(self, 'domain', MAX_DOMAIN, '%04x')
get_pci_dev_info(self, 'bus', MAX_BUS, '%02x')
get_pci_dev_info(self, 'slot', MAX_SLOT, '%02x')
except (KeyError, ValueError):
raise exception.PciDeviceWrongAddressFormat(address=pci_addr)
def match(self, phys_pci_addr):
conditions = [
self.domain == phys_pci_addr.domain,
self.bus == phys_pci_addr.bus,
self.slot == phys_pci_addr.slot,
self.func == phys_pci_addr.func,
]
return all(conditions)
class PciAddressGlobSpec(PciAddressSpec):
"""Manages the address fields with glob style.
This function class will validate the address fields with glob style,
check for wildcards, and insert wildcards where the field is left blank.
"""
def __init__(self, pci_addr):
self.domain = ANY
self.bus = ANY
self.slot = ANY
self.func = ANY
dbs, sep, func = pci_addr.partition('.')
if func:
self.func = func.strip()
get_pci_dev_info(self, 'func', MAX_FUNC, '%01x')
if dbs:
dbs_fields = dbs.split(':')
if len(dbs_fields) > 3:
raise exception.PciDeviceWrongAddressFormat(address=pci_addr)
# If we got a partial address like ":00.", we need to turn this
# into a domain of ANY, a bus of ANY, and a slot of 00. This code
# allows the address bus and/or domain to be left off
dbs_all = [ANY] * (3 - len(dbs_fields))
dbs_all.extend(dbs_fields)
dbs_checked = [s.strip() or ANY for s in dbs_all]
self.domain, self.bus, self.slot = dbs_checked
get_pci_dev_info(self, 'domain', MAX_DOMAIN, '%04x')
get_pci_dev_info(self, 'bus', MAX_BUS, '%02x')
get_pci_dev_info(self, 'slot', MAX_SLOT, '%02x')
def match(self, phys_pci_addr):
conditions = [
self.domain in (ANY, phys_pci_addr.domain),
self.bus in (ANY, phys_pci_addr.bus),
self.slot in (ANY, phys_pci_addr.slot),
self.func in (ANY, phys_pci_addr.func)
]
return all(conditions)
class PciAddressRegexSpec(PciAddressSpec):
"""Manages the address fields with regex style.
This function class will validate the address fields with regex style.
The validation includes check for all PCI address attributes and validate
their regex.
"""
def __init__(self, pci_addr):
try:
self.domain = pci_addr.get('domain', REGEX_ANY)
self.bus = pci_addr.get('bus', REGEX_ANY)
self.slot = pci_addr.get('slot', REGEX_ANY)
self.func = pci_addr.get('function', REGEX_ANY)
self.domain_regex = re.compile(self.domain)
self.bus_regex = re.compile(self.bus)
self.slot_regex = re.compile(self.slot)
self.func_regex = re.compile(self.func)
except re.error:
raise exception.PciDeviceWrongAddressFormat(address=pci_addr)
def match(self, phys_pci_addr):
conditions = [
bool(self.domain_regex.match(phys_pci_addr.domain)),
bool(self.bus_regex.match(phys_pci_addr.bus)),
bool(self.slot_regex.match(phys_pci_addr.slot)),
bool(self.func_regex.match(phys_pci_addr.func))
]
return all(conditions)
class WhitelistPciAddress(object):
"""Manages the address fields of the whitelist.
This class checks the address fields of the pci.passthrough_whitelist
configuration option, validating the address fields.
Example config are:
| [pci]
| passthrough_whitelist = {"address":"*:0a:00.*",
| "physical_network":"physnet1"}
| passthrough_whitelist = {"address": {"domain": ".*",
"bus": "02",
"slot": "01",
"function": "[0-2]"},
"physical_network":"net1"}
| passthrough_whitelist = {"vendor_id":"1137","product_id":"0071"}
"""
def __init__(self, pci_addr, is_physical_function):
self.is_physical_function = is_physical_function
self._init_address_fields(pci_addr)
def _check_physical_function(self):
if self.pci_address_spec.is_single_address():
self.is_physical_function = (
utils.is_physical_function(
self.pci_address_spec.domain,
self.pci_address_spec.bus,
self.pci_address_spec.slot,
self.pci_address_spec.func))
def _init_address_fields(self, pci_addr):
if not self.is_physical_function:
if isinstance(pci_addr, six.string_types):
self.pci_address_spec = PciAddressGlobSpec(pci_addr)
elif isinstance(pci_addr, dict):
self.pci_address_spec = PciAddressRegexSpec(pci_addr)
else:
raise exception.PciDeviceWrongAddressFormat(address=pci_addr)
self._check_physical_function()
else:
self.pci_address_spec = PhysicalPciAddress(pci_addr)
def match(self, pci_addr, pci_phys_addr):
"""Match a device to this PciAddress. Assume this is called given
pci_addr and pci_phys_addr reported by libvirt, no attempt is made to
verify if pci_addr is a VF of pci_phys_addr.
:param pci_addr: PCI address of the device to match.
:param pci_phys_addr: PCI address of the parent of the device to match
(or None if the device is not a VF).
"""
# Try to match on the parent PCI address if the PciDeviceSpec is a
# PF (sriov is available) and the device to match is a VF. This
# makes possible to specify the PCI address of a PF in the
# pci_passthrough_whitelist to match any of it's VFs PCI devices.
if self.is_physical_function and pci_phys_addr:
pci_phys_addr_obj = PhysicalPciAddress(pci_phys_addr)
if self.pci_address_spec.match(pci_phys_addr_obj):
return True
# Try to match on the device PCI address only.
pci_addr_obj = PhysicalPciAddress(pci_addr)
return self.pci_address_spec.match(pci_addr_obj)
class PciDeviceSpec(object):
def __init__(self, dev_spec):
self.tags = dev_spec
self._init_dev_details()
def _init_dev_details(self):
self.vendor_id = self.tags.pop("vendor_id", ANY)
self.product_id = self.tags.pop("product_id", ANY)
# Note(moshele): The address attribute can be a string or a dict.
# For glob syntax or specific pci it is a string and for regex syntax
# it is a dict. The WhitelistPciAddress class handles both types.
self.address = self.tags.pop("address", None)
self.dev_name = self.tags.pop("devname", None)
self.vendor_id = self.vendor_id.strip()
get_pci_dev_info(self, 'vendor_id', MAX_VENDOR_ID, '%04x')
get_pci_dev_info(self, 'product_id', MAX_PRODUCT_ID, '%04x')
if self.address and self.dev_name:
raise exception.PciDeviceInvalidDeviceName()
if not self.dev_name:
pci_address = self.address or "*:*:*.*"
self.address = WhitelistPciAddress(pci_address, False)
def match(self, dev_dict):
if self.dev_name:
address_str, pf = utils.get_function_by_ifname(
self.dev_name)
if not address_str:
return False
# Note(moshele): In this case we always passing a string
# of the PF pci address
address_obj = WhitelistPciAddress(address_str, pf)
elif self.address:
address_obj = self.address
return all([
self.vendor_id in (ANY, dev_dict['vendor_id']),
self.product_id in (ANY, dev_dict['product_id']),
address_obj.match(dev_dict['address'],
dev_dict.get('parent_addr'))])
def match_pci_obj(self, pci_obj):
return self.match({'vendor_id': pci_obj.vendor_id,
'product_id': pci_obj.product_id,
'address': pci_obj.address,
'parent_addr': pci_obj.parent_addr})
def get_tags(self):
return self.tags
|
|
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""An implementation of a data store based on mysql."""
import Queue
import re
import thread
import threading
import time
import MySQLdb
from MySQLdb import cursors
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import rdfvalue
from grr.lib import utils
# pylint: disable=nonstandard-exception
class Error(data_store.Error):
"""Base class for all exceptions in this module."""
# pylint: enable=nonstandard-exception
class MySQLConnection(object):
"""A Class to manage MySQL database connections."""
def __init__(self, queue=None):
self.queue = queue
try:
self._MakeConnection(database=config_lib.CONFIG["Mysql.database_name"])
except MySQLdb.OperationalError as e:
# Database does not exist
if "Unknown database" in str(e):
dbh = self._MakeConnection()
cursor = dbh.cursor()
cursor.execute("Create database `%s`" %
config_lib.CONFIG["Mysql.database_name"])
self._MakeConnection(database=config_lib.CONFIG["Mysql.database_name"])
else:
raise
def _MakeConnection(self, database=""):
try:
connection_args = dict(
user=config_lib.CONFIG["Mysql.database_username"],
db=database, charset="utf8",
passwd=config_lib.CONFIG["Mysql.database_password"],
cursorclass=cursors.DictCursor)
if config_lib.CONFIG["Mysql.host"]:
connection_args["host"] = config_lib.CONFIG["Mysql.host"]
if config_lib.CONFIG["Mysql.port"]:
connection_args["port"] = config_lib.CONFIG["Mysql.port"]
self.dbh = MySQLdb.connect(**connection_args)
self.cursor = self.dbh.cursor()
self.cursor.connection.autocommit(True)
return self.dbh
except MySQLdb.OperationalError as e:
# This is a fatal error, we just raise the top level exception here.
if "Access denied" in str(e):
raise Error(str(e))
raise
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
if self.queue:
self.queue.put(self)
def Execute(self, *args):
"""Executes a query."""
retries = 10
for _ in range(1, retries):
try:
self.cursor.execute(*args)
return self.cursor.fetchall()
except MySQLdb.Error:
time.sleep(.2)
try:
database = config_lib.CONFIG["Mysql.database_name"]
self._MakeConnection(database=database)
except MySQLdb.OperationalError:
pass
# If something goes wrong at this point, we just let it raise.
self.cursor.execute(*args)
return self.cursor.fetchall()
class ConnectionPool(object):
"""A pool of connections to the mysql server.
Usage:
with data_store.DB.pool.GetConnection() as connection:
connection.Execute(.....)
"""
def __init__(self, pool_size=5):
self.connections = Queue.Queue()
for _ in range(pool_size):
self.connections.put(MySQLConnection(self.connections))
def GetConnection(self):
return self.connections.get(block=True)
class MySQLAdvancedDataStore(data_store.DataStore):
"""A mysql based data store."""
POOL = None
SYSTEM_TABLE = "system"
def __init__(self):
# Use the global connection pool.
if MySQLAdvancedDataStore.POOL is None:
MySQLAdvancedDataStore.POOL = ConnectionPool()
self.pool = self.POOL
self.to_insert = []
self._CalculateAttributeStorageTypes()
self.database_name = config_lib.CONFIG["Mysql.database_name"]
self.lock = threading.Lock()
super(MySQLAdvancedDataStore, self).__init__()
def Initialize(self):
try:
self._ExecuteQuery("desc `aff4`")
except MySQLdb.Error:
self.RecreateTables()
def DropTables(self):
"""Drop all existing tables."""
rows = self._ExecuteQuery(
"SELECT table_name FROM information_schema.tables "
"WHERE table_schema='%s'" % self.database_name)
for row in rows:
self._ExecuteQuery("DROP TABLE `%s`" % row["table_name"])
def RecreateTables(self):
"""Drops the tables and creates a new ones."""
self.DropTables()
self._CreateTables()
def Transaction(self, subject, lease_time=None, token=None):
return MySQLTransaction(self, subject, lease_time=lease_time, token=token)
def Size(self):
query = ("SELECT table_schema, Sum(data_length + index_length) `size` "
"FROM information_schema.tables "
"WHERE table_schema = \"%s\" GROUP by table_schema" %
self.database_name)
result = self._ExecuteQuery(query, [])
if len(result) != 1:
return -1
return int(result[0]["size"])
def DeleteAttributes(self, subject, attributes, start=None, end=None,
sync=True, token=None):
"""Remove some attributes from a subject."""
_ = sync # Unused
self.security_manager.CheckDataStoreAccess(token, [subject], "w")
if not attributes:
return
for attribute in attributes:
timestamp = self._MakeTimestamp(start, end)
attribute = utils.SmartUnicode(attribute)
transaction = self._BuildDelete(subject, attribute, timestamp)
self._ExecuteTransaction(transaction)
def DeleteSubject(self, subject, sync=False, token=None):
_ = sync
self.security_manager.CheckDataStoreAccess(token, [subject], "w")
transaction = self._BuildDelete(subject)
self._ExecuteTransaction(transaction)
def ResolveMulti(self, subject, attributes, timestamp=None, limit=None,
token=None):
"""Resolves multiple attributes at once for one subject."""
self.security_manager.CheckDataStoreAccess(
token, [subject], self.GetRequiredResolveAccess(attributes))
for attribute in attributes:
query, args = self._BuildQuery(subject, attribute, timestamp, limit)
result = self._ExecuteQuery(query, args)
for row in result:
value = self._Decode(attribute, row["value"])
yield attribute, value, rdfvalue.RDFDatetime(row["timestamp"])
if limit:
limit -= len(result)
if limit is not None and limit <= 0:
break
def MultiResolveRegex(self, subjects, attribute_regex, timestamp=None,
limit=None, token=None):
"""Result multiple subjects using one or more attribute regexps."""
result = {}
for subject in subjects:
values = self.ResolveRegex(subject, attribute_regex, token=token,
timestamp=timestamp, limit=limit)
if values:
result[subject] = values
if limit:
limit -= len(values)
if limit is not None and limit <= 0:
break
return result.iteritems()
def ResolveRegex(self, subject, attribute_regex, timestamp=None, limit=None,
token=None):
"""ResolveRegex."""
self.security_manager.CheckDataStoreAccess(
token, [subject], self.GetRequiredResolveAccess(attribute_regex))
if isinstance(attribute_regex, basestring):
attribute_regex = [attribute_regex]
results = []
for regex in attribute_regex:
query, args = self._BuildQuery(subject, regex, timestamp, limit,
is_regex=True)
rows = self._ExecuteQuery(query, args)
for row in rows:
attribute = row["attribute"]
value = self._Decode(attribute, row["value"])
results.append((attribute, value, row["timestamp"]))
return results
def MultiSet(self, subject, values, timestamp=None, replace=True, sync=True,
to_delete=None, token=None):
"""Set multiple attributes' values for this subject in one operation."""
self.security_manager.CheckDataStoreAccess(token, [subject], "w")
to_delete = set(to_delete or [])
if timestamp is None:
timestamp = time.time() * 1e6
# Prepare a bulk insert operation.
subject = utils.SmartUnicode(subject)
to_insert = []
to_replace = []
# Build a document for each unique timestamp.
for attribute, sequence in values.items():
for value in sequence:
entry_timestamp = None
if isinstance(value, tuple):
value, entry_timestamp = value
if entry_timestamp is None:
entry_timestamp = timestamp
attribute = utils.SmartUnicode(attribute)
data = self._Encode(value)
# Replacing means to delete all versions of the attribute first.
if replace or attribute in to_delete:
duplicates = self._CountDuplicateAttributes(subject, attribute)
if duplicates > 1:
to_delete.add(attribute)
to_insert.append(
[subject, attribute, data, int(entry_timestamp)])
else:
if attribute in to_delete:
to_delete.remove(attribute)
if duplicates == 0:
to_insert.append(
[subject, attribute, data, int(entry_timestamp)])
elif duplicates == 1:
to_replace.append(
[subject, attribute, data, int(entry_timestamp)])
else:
to_insert.append(
[subject, attribute, data, int(entry_timestamp)])
if to_delete:
self.DeleteAttributes(subject, to_delete, token=token)
if to_replace:
transaction = self._BuildReplaces(to_replace)
self._ExecuteTransaction(transaction)
if to_insert:
if sync:
transaction = self._BuildInserts(to_insert)
self._ExecuteTransaction(transaction)
else:
with self.lock:
self.to_insert.extend(to_insert)
def Flush(self):
with self.lock:
to_insert = self.to_insert
self.to_insert = []
if to_insert:
transaction = self._BuildInserts(to_insert)
self._ExecuteTransaction(transaction)
def _CountDuplicateAttributes(self, subject, attribute):
query = ("SELECT count(*) AS total FROM aff4 "
"WHERE subject_hash=unhex(md5(%s)) "
"AND attribute_hash=unhex(md5(%s))")
args = [subject, attribute]
result = self._ExecuteQuery(query, args)
return int(result[0]["total"])
def _BuildReplaces(self, values):
transaction = []
for (subject, attribute, value, timestamp) in values:
aff4_q = {}
aff4_q["query"] = (
"UPDATE aff4 SET value=%s, timestamp=%s "
"WHERE subject_hash=unhex(md5(%s)) "
"AND attribute_hash=unhex(md5(%s))")
aff4_q["args"] = [value, timestamp, subject, attribute]
transaction.append(aff4_q)
return transaction
def _BuildInserts(self, values):
subjects_q = {}
attributes_q = {}
aff4_q = {}
subjects_q["query"] = "INSERT IGNORE INTO subjects (hash, subject) VALUES"
attributes_q["query"] = (
"INSERT IGNORE INTO attributes (hash, attribute) VALUES")
aff4_q["query"] = (
"INSERT INTO aff4 (subject_hash, attribute_hash, "
"timestamp, value) VALUES")
subjects_q["args"] = []
attributes_q["args"] = []
aff4_q["args"] = []
seen = {}
seen["subjects"] = []
seen["attributes"] = []
for (subject, attribute, value, timestamp) in values:
if subject not in seen["subjects"]:
subjects_q["args"].extend([subject, subject])
seen["subjects"].append(subject)
if attribute not in seen["attributes"]:
attributes_q["args"].extend([attribute, attribute])
seen["attributes"].append(attribute)
aff4_q["args"].extend([subject, attribute, timestamp, value])
subjects_q["query"] += ", ".join(
["(unhex(md5(%s)), %s)"] * (len(subjects_q["args"]) / 2))
attributes_q["query"] += ", ".join(
["(unhex(md5(%s)), %s)"] * (len(attributes_q["args"]) / 2))
aff4_q["query"] += ", ".join(
["(unhex(md5(%s)), unhex(md5(%s)), %s, %s)"] * (
len(aff4_q["args"]) / 4))
return [aff4_q, subjects_q, attributes_q]
def _ExecuteTransaction(self, transaction):
"""Get connection from pool and execute query."""
for query in transaction:
with self.pool.GetConnection() as cursor:
cursor.Execute(query["query"], query["args"])
def _CalculateAttributeStorageTypes(self):
"""Build a mapping between column names and types."""
self.attribute_types = {}
for attribute in aff4.Attribute.PREDICATES.values():
self.attribute_types[attribute.predicate] = (
attribute.attribute_type.data_store_type)
def _Encode(self, value):
"""Encode the value for the attribute."""
try:
return buffer(value.SerializeToString())
except AttributeError:
# Types "string" and "bytes" are stored as strings here.
return buffer(utils.SmartStr(value))
def _Decode(self, attribute, value):
required_type = self.attribute_types.get(attribute, "bytes")
if isinstance(value, buffer):
value = str(value)
if required_type in ("integer", "unsigned_integer"):
return int(value)
elif required_type == "string":
return utils.SmartUnicode(value)
else:
return value
def _BuildQuery(self, subject, attribute=None, timestamp=None,
limit=None, is_regex=False):
"""Build the SELECT query to be executed."""
args = []
fields = ""
criteria = "WHERE aff4.subject_hash=unhex(md5(%s))"
args.append(subject)
sorting = ""
tables = "FROM aff4"
subject = utils.SmartUnicode(subject)
# Set fields, tables, and criteria and append args
if attribute is not None:
if is_regex:
tables += " JOIN attributes ON aff4.attribute_hash=attributes.hash"
regex = re.match(r'(^[a-zA-Z0-9_\- /:]+)(.*)', attribute)
if not regex:
# If attribute has no prefix just rlike
criteria += " AND attributes.attribute rlike %s"
args.append(attribute)
else:
rlike = regex.groups()[1]
if rlike:
# If there is a regex component attempt to replace with like
like = regex.groups()[0] + "%"
criteria += " AND attributes.attribute like %s"
args.append(like)
# If the regex portion is not a match all regex then add rlike
if not (rlike == ".*" or rlike == ".+"):
criteria += " AND attributes.attribute rlike %s"
args.append(rlike)
else:
# If no regex component then treat as full attribute
criteria += " AND aff4.attribute_hash=unhex(md5(%s))"
args.append(attribute)
else:
criteria += " AND aff4.attribute_hash=unhex(md5(%s))"
args.append(attribute)
# Limit to time range if specified
if isinstance(timestamp, (tuple, list)):
criteria += " AND aff4.timestamp >= %s AND aff4.timestamp <= %s"
args.append(int(timestamp[0]))
args.append(int(timestamp[1]))
fields = "aff4.value, aff4.timestamp"
if is_regex:
fields += ", attributes.attribute"
# Modify fields and sorting for timestamps.
if timestamp is None or timestamp == self.NEWEST_TIMESTAMP:
tables += (" JOIN (SELECT attribute_hash, MAX(timestamp) timestamp "
"%s %s GROUP BY attribute_hash) maxtime ON "
"aff4.attribute_hash=maxtime.attribute_hash AND "
"aff4.timestamp=maxtime.timestamp") % (tables, criteria)
criteria = "WHERE aff4.subject_hash=unhex(md5(%s))"
args.append(subject)
else:
# Always order results.
sorting = "ORDER BY aff4.timestamp DESC"
# Add limit if set.
if limit:
sorting += " LIMIT %s" % int(limit)
query = " ".join(["SELECT", fields, tables, criteria, sorting])
return (query, args)
def _BuildDelete(self, subject, attribute=None, timestamp=None):
"""Build the DELETE query to be executed."""
subjects_q = {}
attributes_q = {}
aff4_q = {}
subjects_q["query"] = (
"DELETE subjects FROM subjects WHERE hash=unhex(md5(%s))")
subjects_q["args"] = [subject]
aff4_q["query"] = "DELETE aff4 FROM aff4 WHERE subject_hash=unhex(md5(%s))"
aff4_q["args"] = [subject]
attributes_q["query"] = ""
attributes_q["args"] = []
if attribute:
aff4_q["query"] += " AND attribute_hash=unhex(md5(%s))"
aff4_q["args"].append(attribute)
if isinstance(timestamp, (tuple, list)):
aff4_q["query"] += " AND aff4.timestamp >= %s AND aff4.timestamp <= %s"
aff4_q["args"].append(int(timestamp[0]))
aff4_q["args"].append(int(timestamp[1]))
subjects_q["query"] = (
"DELETE subjects FROM subjects "
"LEFT JOIN aff4 ON aff4.subject_hash=subjects.hash "
"WHERE subjects.hash=unhex(md5(%s)) "
"AND aff4.subject_hash IS NULL")
attributes_q["query"] = (
"DELETE attributes FROM attributes "
"LEFT JOIN aff4 ON aff4.attribute_hash=attributes.hash "
"WHERE attributes.hash=unhex(md5(%s)) "
"AND aff4.attribute_hash IS NULL")
attributes_q["args"].append(attribute)
return [aff4_q, subjects_q, attributes_q]
return [aff4_q, subjects_q]
def _ExecuteQuery(self, *args):
"""Get connection from pool and execute query."""
with self.pool.GetConnection() as cursor:
result = cursor.Execute(*args)
return result
def _MakeTimestamp(self, start, end):
"""Create a timestamp using a start and end time.
Args:
start: Start timestamp.
end: End timestamp.
Returns:
A tuple (start, end) of converted timestamps or None for all time.
"""
if start or end:
mysql_unsigned_bigint_max = 18446744073709551615
start = int(start or 0)
end = int(end or mysql_unsigned_bigint_max)
if start == 0 and end == mysql_unsigned_bigint_max:
return None
else:
return (start, end)
def _CreateTables(self):
self._ExecuteQuery("""
CREATE TABLE IF NOT EXISTS `subjects` (
hash BINARY(16) PRIMARY KEY NOT NULL,
subject TEXT CHARACTER SET utf8 NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT ='Table for storing subjects';
""")
self._ExecuteQuery("""
CREATE TABLE IF NOT EXISTS `attributes` (
hash BINARY(16) PRIMARY KEY NOT NULL,
attribute VARCHAR(2048) CHARACTER SET utf8 DEFAULT NULL,
KEY `attribute` (`attribute`(32))
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT ='Table storing attributes';
""")
self._ExecuteQuery("""
CREATE TABLE IF NOT EXISTS `aff4` (
id BIGINT UNSIGNED PRIMARY KEY NOT NULL AUTO_INCREMENT,
subject_hash BINARY(16) NOT NULL,
attribute_hash BINARY(16) NOT NULL,
timestamp BIGINT UNSIGNED DEFAULT NULL,
value LONGBLOB NULL,
KEY `master` (`subject_hash`,`attribute_hash`,`timestamp`),
KEY `attribute` (`attribute_hash`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8
COMMENT ='Table representing AFF4 objects';
""")
self._ExecuteQuery("""
CREATE TABLE IF NOT EXISTS `locks` (
subject_hash BINARY(16) PRIMARY KEY NOT NULL,
lock_owner BIGINT UNSIGNED DEFAULT NULL,
lock_expiration BIGINT UNSIGNED DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8
COMMENT ='Table representing locks on subjects';
""")
class MySQLTransaction(data_store.CommonTransaction):
"""The Mysql data store transaction object.
This object does not aim to ensure ACID like consistently. We only ensure that
two simultaneous locks can not be held on the same AFF4 subject.
This means that the first thread which grabs the lock is considered the owner
of the transaction. Any subsequent transactions on the same subject will fail
immediately with data_store.TransactionError.
A lock is considered expired after a certain time.
"""
def __init__(self, store, subject, lease_time=None, token=None):
"""Ensure we can take a lock on this subject."""
super(MySQLTransaction, self).__init__(store, subject,
lease_time=lease_time, token=token)
if lease_time is None:
lease_time = config_lib.CONFIG["Datastore.transaction_timeout"]
self.lock_token = thread.get_ident()
self.lock_time = lease_time
self.expires_lock = int((time.time() + self.lock_time) * 1e6)
# This will take over the lock if the lock is too old.
query = (
"UPDATE locks SET lock_expiration=%s, lock_owner=%s "
"WHERE subject_hash=unhex(md5(%s)) "
"AND (lock_expiration < %s)")
args = [self.expires_lock, self.lock_token, subject, time.time() * 1e6]
self.ExecuteQuery(query, args)
self.CheckForLock()
def ExecuteQuery(self, query, args):
return self.store._ExecuteQuery(query, args) # pylint: disable=protected-access
def UpdateLease(self, lease_time):
self.expires_lock = int((time.time() + lease_time) * 1e6)
# This will take over the lock if the lock is too old.
query = (
"UPDATE locks SET lock_expiration=%s, lock_owner=%s "
"WHERE subject_hash=unhex(md5(%s))")
args = [self.expires_lock, self.lock_token, self.subject]
self.ExecuteQuery(query, args)
def CheckLease(self):
return max(0, self.expires_lock / 1e6 - time.time())
def CheckForLock(self):
"""Checks that the lock has stuck."""
query = ("SELECT lock_expiration, lock_owner FROM locks "
"WHERE subject_hash=unhex(md5(%s))")
args = [self.subject]
rows = self.ExecuteQuery(query, args)
for row in rows:
# We own this lock now.
if (row["lock_expiration"] == self.expires_lock and
row["lock_owner"] == self.lock_token):
return
else:
# Someone else owns this lock.
raise data_store.TransactionError("Subject %s is locked" % self.subject)
# If we get here the row does not exist:
query = ("INSERT IGNORE INTO locks "
"SET lock_expiration=%s, lock_owner=%s, "
"subject_hash=unhex(md5(%s))")
args = [self.expires_lock, self.lock_token, self.subject]
self.ExecuteQuery(query, args)
self.CheckForLock()
def Abort(self):
self._RemoveLock()
def Commit(self):
super(MySQLTransaction, self).Commit()
self._RemoveLock()
def _RemoveLock(self):
# Remove the lock on the document. Note that this only resets the lock if
# We actually hold it since lock_expiration == self.expires_lock and
# lock_owner = self.lock_token.
query = ("UPDATE locks SET lock_expiration=0, lock_owner=0 "
"WHERE lock_expiration=%s "
"AND lock_owner=%s "
"AND subject_hash=unhex(md5(%s))")
args = [self.expires_lock, self.lock_token, self.subject]
self.ExecuteQuery(query, args)
|
|
#!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: cloudformation
short_description: Create or delete an AWS CloudFormation stack
description:
- Launches or updates an AWS CloudFormation stack and waits for it complete.
notes:
- Cloudformation features change often, and this module tries to keep up. That means your botocore version should be fresh.
The version listed in the requirements is the oldest version that works with the module as a whole.
Some features may require recent versions, and we do not pinpoint a minimum version for each feature.
Instead of relying on the minimum version, keep botocore up to date. AWS is always releasing features and fixing bugs.
version_added: "1.1"
options:
stack_name:
description:
- name of the cloudformation stack
required: true
disable_rollback:
description:
- If a stacks fails to form, rollback will remove the stack
type: bool
default: 'no'
on_create_failure:
description:
- Action to take upon failure of stack creation. Incompatible with the disable_rollback option.
choices:
- DO_NOTHING
- ROLLBACK
- DELETE
version_added: "2.8"
create_timeout:
description:
- The amount of time (in minutes) that can pass before the stack status becomes CREATE_FAILED
version_added: "2.6"
template_parameters:
description:
- A list of hashes of all the template variables for the stack. The value can be a string or a dict.
- Dict can be used to set additional template parameter attributes like UsePreviousValue (see example).
default: {}
state:
description:
- If state is "present", stack will be created. If state is "present" and if stack exists and template has changed, it will be updated.
If state is "absent", stack will be removed.
default: present
choices: [ present, absent ]
template:
description:
- The local path of the cloudformation template.
- This must be the full path to the file, relative to the working directory. If using roles this may look
like "roles/cloudformation/files/cloudformation-example.json".
- If 'state' is 'present' and the stack does not exist yet, either 'template', 'template_body' or 'template_url'
must be specified (but only one of them). If 'state' is 'present', the stack does exist, and neither 'template',
'template_body' nor 'template_url' are specified, the previous template will be reused.
notification_arns:
description:
- The Simple Notification Service (SNS) topic ARNs to publish stack related events.
version_added: "2.0"
stack_policy:
description:
- the path of the cloudformation stack policy. A policy cannot be removed once placed, but it can be modified.
for instance, allow all updates U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
version_added: "1.9"
tags:
description:
- Dictionary of tags to associate with stack and its resources during stack creation. Can be updated later, updating tags removes previous entries.
version_added: "1.4"
template_url:
description:
- Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region
as the stack.
- If 'state' is 'present' and the stack does not exist yet, either 'template', 'template_body' or 'template_url'
must be specified (but only one of them). If 'state' is present, the stack does exist, and neither 'template',
'template_body' nor 'template_url' are specified, the previous template will be reused.
version_added: "2.0"
create_changeset:
description:
- "If stack already exists create a changeset instead of directly applying changes.
See the AWS Change Sets docs U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html).
WARNING: if the stack does not exist, it will be created without changeset. If the state is absent, the stack will be deleted immediately with no
changeset."
type: bool
default: 'no'
version_added: "2.4"
changeset_name:
description:
- Name given to the changeset when creating a changeset, only used when create_changeset is true. By default a name prefixed with Ansible-STACKNAME
is generated based on input parameters.
See the AWS Change Sets docs U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)
version_added: "2.4"
template_format:
description:
- (deprecated) For local templates, allows specification of json or yaml format. Templates are now passed raw to CloudFormation regardless of format.
This parameter is ignored since Ansible 2.3.
default: json
choices: [ json, yaml ]
version_added: "2.0"
role_arn:
description:
- The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role
docs U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html)
version_added: "2.3"
termination_protection:
description:
- enable or disable termination protection on the stack. Only works with botocore >= 1.7.18.
type: bool
version_added: "2.5"
template_body:
description:
- Template body. Use this to pass in the actual body of the Cloudformation template.
- If 'state' is 'present' and the stack does not exist yet, either 'template', 'template_body' or 'template_url'
must be specified (but only one of them). If 'state' is present, the stack does exist, and neither 'template',
'template_body' nor 'template_url' are specified, the previous template will be reused.
version_added: "2.5"
events_limit:
description:
- Maximum number of CloudFormation events to fetch from a stack when creating or updating it.
default: 200
version_added: "2.7"
backoff_delay:
description:
- Number of seconds to wait for the next retry.
default: 3
version_added: "2.8"
type: int
required: False
backoff_max_delay:
description:
- Maximum amount of time to wait between retries.
default: 30
version_added: "2.8"
type: int
required: False
backoff_retries:
description:
- Number of times to retry operation.
- AWS API throttling mechanism fails Cloudformation module so we have to retry a couple of times.
default: 10
version_added: "2.8"
type: int
required: False
capabilities:
description:
- Specify capabilities that stack template contains.
- Valid values are CAPABILITY_IAM, CAPABILITY_NAMED_IAM and CAPABILITY_AUTO_EXPAND.
type: list
version_added: "2.8"
default: [ CAPABILITY_IAM, CAPABILITY_NAMED_IAM ]
author: "James S. Martin (@jsmartin)"
extends_documentation_fragment:
- aws
- ec2
requirements: [ boto3, botocore>=1.5.45 ]
'''
EXAMPLES = '''
- name: create a cloudformation stack
cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "files/cloudformation-example.json"
template_parameters:
KeyName: "jmartin"
DiskType: "ephemeral"
InstanceType: "m1.small"
ClusterSize: 3
tags:
Stack: "ansible-cloudformation"
# Basic role example
- name: create a stack, specify role that cloudformation assumes
cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "roles/cloudformation/files/cloudformation-example.json"
role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role'
- name: delete a stack
cloudformation:
stack_name: "ansible-cloudformation-old"
state: "absent"
# Create a stack, pass in template from a URL, disable rollback if stack creation fails,
# pass in some parameters to the template, provide tags for resources created
- name: create a stack, pass in the template via an URL
cloudformation:
stack_name: "ansible-cloudformation"
state: present
region: us-east-1
disable_rollback: true
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
template_parameters:
KeyName: jmartin
DiskType: ephemeral
InstanceType: m1.small
ClusterSize: 3
tags:
Stack: ansible-cloudformation
# Create a stack, passing in template body using lookup of Jinja2 template, disable rollback if stack creation fails,
# pass in some parameters to the template, provide tags for resources created
- name: create a stack, pass in the template body via lookup template
cloudformation:
stack_name: "ansible-cloudformation"
state: present
region: us-east-1
disable_rollback: true
template_body: "{{ lookup('template', 'cloudformation.j2') }}"
template_parameters:
KeyName: jmartin
DiskType: ephemeral
InstanceType: m1.small
ClusterSize: 3
tags:
Stack: ansible-cloudformation
# Pass a template parameter which uses Cloudformation's UsePreviousValue attribute
# When use_previous_value is set to True, the given value will be ignored and
# Cloudformation will use the value from a previously submitted template.
# If use_previous_value is set to False (default) the given value is used.
- cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
template: "files/cloudformation-example.json"
template_parameters:
DBSnapshotIdentifier:
use_previous_value: True
value: arn:aws:rds:es-east-1:000000000000:snapshot:rds:my-db-snapshot
DBName:
use_previous_value: True
tags:
Stack: "ansible-cloudformation"
# Enable termination protection on a stack.
# If the stack already exists, this will update its termination protection
- name: enable termination protection during stack creation
cloudformation:
stack_name: my_stack
state: present
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
termination_protection: yes
# Configure TimeoutInMinutes before the stack status becomes CREATE_FAILED
# In this case, if disable_rollback is not set or is set to false, the stack will be rolled back.
- name: enable termination protection during stack creation
cloudformation:
stack_name: my_stack
state: present
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
create_timeout: 5
# Configure rollback behaviour on the unsuccessful creation of a stack allowing
# CloudFormation to clean up, or do nothing in the event of an unsuccessful
# deployment
# In this case, if on_create_failure is set to "DELETE", it will clean up the stack if
# it fails to create
- name: create stack which will delete on creation failure
cloudformation:
stack_name: my_stack
state: present
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
on_create_failure: DELETE
'''
RETURN = '''
events:
type: list
description: Most recent events in Cloudformation's event log. This may be from a previous run in some cases.
returned: always
sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"]
log:
description: Debugging logs. Useful when modifying or finding an error.
returned: always
type: list
sample: ["updating stack"]
stack_resources:
description: AWS stack resources and their status. List of dictionaries, one dict per resource.
returned: state == present
type: list
sample: [
{
"last_updated_time": "2016-10-11T19:40:14.979000+00:00",
"logical_resource_id": "CFTestSg",
"physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F",
"resource_type": "AWS::EC2::SecurityGroup",
"status": "UPDATE_COMPLETE",
"status_reason": null
}
]
stack_outputs:
type: dict
description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary.
returned: state == present
sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"}
''' # NOQA
import json
import time
import uuid
import traceback
from hashlib import sha1
try:
import boto3
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
import ansible.module_utils.ec2
# import a class, otherwise we'll use a fully qualified path
from ansible.module_utils.ec2 import AWSRetry, boto_exception
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
def get_stack_events(cfn, stack_name, events_limit, token_filter=None):
'''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
ret = {'events': [], 'log': []}
try:
pg = cfn.get_paginator(
'describe_stack_events'
).paginate(
StackName=stack_name,
PaginationConfig={'MaxItems': events_limit}
)
if token_filter is not None:
events = list(pg.search(
"StackEvents[?ClientRequestToken == '{0}']".format(token_filter)
))
else:
events = list(pg.search("StackEvents[*]"))
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
if 'does not exist' in error_msg:
# missing stack, don't bail.
ret['log'].append('Stack does not exist.')
return ret
ret['log'].append('Unknown error: ' + str(error_msg))
return ret
for e in events:
eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e)
ret['events'].append(eventline)
if e['ResourceStatus'].endswith('FAILED'):
failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e)
ret['log'].append(failline)
return ret
def create_stack(module, stack_params, cfn, events_limit):
if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.")
# 'DisableRollback', 'TimeoutInMinutes', 'EnableTerminationProtection' and
# 'OnFailure' only apply on creation, not update.
#
# 'OnFailure' and 'DisableRollback' are incompatible with each other, so
# throw error if both are defined
if module.params.get('on_create_failure') is None:
stack_params['DisableRollback'] = module.params['disable_rollback']
else:
if module.params['disable_rollback']:
module.fail_json(msg="You can specify either 'on_create_failure' or 'disable_rollback', but not both.")
stack_params['OnFailure'] = module.params['on_create_failure']
if module.params.get('create_timeout') is not None:
stack_params['TimeoutInMinutes'] = module.params['create_timeout']
if module.params.get('termination_protection') is not None:
if boto_supports_termination_protection(cfn):
stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection'))
else:
module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
try:
response = cfn.create_stack(**stack_params)
# Use stack ID to follow stack state in case of on_create_failure = DELETE
result = stack_operation(cfn, response['StackId'], 'CREATE', events_limit, stack_params.get('ClientRequestToken', None))
except Exception as err:
error_msg = boto_exception(err)
module.fail_json(msg="Failed to create stack {0}: {1}.".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
if not result:
module.fail_json(msg="empty result")
return result
def list_changesets(cfn, stack_name):
res = cfn.list_change_sets(StackName=stack_name)
return [cs['ChangeSetName'] for cs in res['Summaries']]
def create_changeset(module, stack_params, cfn, events_limit):
if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
module.fail_json(msg="Either 'template' or 'template_url' is required.")
if module.params['changeset_name'] is not None:
stack_params['ChangeSetName'] = module.params['changeset_name']
# changesets don't accept ClientRequestToken parameters
stack_params.pop('ClientRequestToken', None)
try:
changeset_name = build_changeset_name(stack_params)
stack_params['ChangeSetName'] = changeset_name
# Determine if this changeset already exists
pending_changesets = list_changesets(cfn, stack_params['StackName'])
if changeset_name in pending_changesets:
warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets)
result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning])
else:
cs = cfn.create_change_set(**stack_params)
# Make sure we don't enter an infinite loop
time_end = time.time() + 600
while time.time() < time_end:
try:
newcs = cfn.describe_change_set(ChangeSetName=cs['Id'])
except botocore.exceptions.BotoCoreError as err:
error_msg = boto_exception(err)
module.fail_json(msg=error_msg)
if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS':
time.sleep(1)
elif newcs['Status'] == 'FAILED' and "The submitted information didn't contain changes" in newcs['StatusReason']:
cfn.delete_change_set(ChangeSetName=cs['Id'])
result = dict(changed=False,
output='The created Change Set did not contain any changes to this stack and was deleted.')
# a failed change set does not trigger any stack events so we just want to
# skip any further processing of result and just return it directly
return result
else:
break
# Lets not hog the cpu/spam the AWS API
time.sleep(1)
result = stack_operation(cfn, stack_params['StackName'], 'CREATE_CHANGESET', events_limit)
result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']),
'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'],
'NOTE that dependencies on this stack might fail due to pending changes!']
except Exception as err:
error_msg = boto_exception(err)
if 'No updates are to be performed.' in error_msg:
result = dict(changed=False, output='Stack is already up-to-date.')
else:
module.fail_json(msg="Failed to create change set: {0}".format(error_msg), exception=traceback.format_exc())
if not result:
module.fail_json(msg="empty result")
return result
def update_stack(module, stack_params, cfn, events_limit):
if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
stack_params['UsePreviousTemplate'] = True
# if the state is present and the stack already exists, we try to update it.
# AWS will tell us if the stack template and parameters are the same and
# don't need to be updated.
try:
cfn.update_stack(**stack_params)
result = stack_operation(cfn, stack_params['StackName'], 'UPDATE', events_limit, stack_params.get('ClientRequestToken', None))
except Exception as err:
error_msg = boto_exception(err)
if 'No updates are to be performed.' in error_msg:
result = dict(changed=False, output='Stack is already up-to-date.')
else:
module.fail_json(msg="Failed to update stack {0}: {1}".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
if not result:
module.fail_json(msg="empty result")
return result
def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state):
'''updates termination protection of a stack'''
if not boto_supports_termination_protection(cfn):
module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
stack = get_stack_facts(cfn, stack_name)
if stack:
if stack['EnableTerminationProtection'] is not desired_termination_protection_state:
try:
cfn.update_termination_protection(
EnableTerminationProtection=desired_termination_protection_state,
StackName=stack_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=boto_exception(e), exception=traceback.format_exc())
def boto_supports_termination_protection(cfn):
'''termination protection was added in botocore 1.7.18'''
return hasattr(cfn, "update_termination_protection")
def stack_operation(cfn, stack_name, operation, events_limit, op_token=None):
'''gets the status of a stack while it is created/updated/deleted'''
existed = []
while True:
try:
stack = get_stack_facts(cfn, stack_name)
existed.append('yes')
except Exception:
# If the stack previously existed, and now can't be found then it's
# been deleted successfully.
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
ret = get_stack_events(cfn, stack_name, events_limit, op_token)
ret.update({'changed': True, 'output': 'Stack Deleted'})
return ret
else:
return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()}
ret = get_stack_events(cfn, stack_name, events_limit, op_token)
if not stack:
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
ret = get_stack_events(cfn, stack_name, events_limit, op_token)
ret.update({'changed': True, 'output': 'Stack Deleted'})
return ret
else:
ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'})
return ret
# it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE
# Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13
elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET':
ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation})
return ret
elif stack['StackStatus'] == 'DELETE_COMPLETE' and operation == 'CREATE':
ret.update({'changed': True, 'failed': True, 'output': 'Stack create failed. Delete complete.'})
return ret
# note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases.
elif stack['StackStatus'].endswith('_COMPLETE'):
ret.update({'changed': True, 'output': 'Stack %s complete' % operation})
return ret
elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
return ret
# note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases.
elif stack['StackStatus'].endswith('_FAILED'):
ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation})
return ret
else:
# this can loop forever :/
time.sleep(5)
return {'failed': True, 'output': 'Failed for unknown reasons.'}
def build_changeset_name(stack_params):
if 'ChangeSetName' in stack_params:
return stack_params['ChangeSetName']
json_params = json.dumps(stack_params, sort_keys=True)
return 'Ansible-{0}-{1}'.format(
stack_params['StackName'],
sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest()
)
def check_mode_changeset(module, stack_params, cfn):
"""Create a change set, describe it and delete it before returning check mode outputs."""
stack_params['ChangeSetName'] = build_changeset_name(stack_params)
# changesets don't accept ClientRequestToken parameters
stack_params.pop('ClientRequestToken', None)
try:
change_set = cfn.create_change_set(**stack_params)
for i in range(60): # total time 5 min
description = cfn.describe_change_set(ChangeSetName=change_set['Id'])
if description['Status'] in ('CREATE_COMPLETE', 'FAILED'):
break
time.sleep(5)
else:
# if the changeset doesn't finish in 5 mins, this `else` will trigger and fail
module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName'])
cfn.delete_change_set(ChangeSetName=change_set['Id'])
reason = description.get('StatusReason')
if description['Status'] == 'FAILED' and "didn't contain changes" in description['StatusReason']:
return {'changed': False, 'msg': reason, 'meta': description['StatusReason']}
return {'changed': True, 'msg': reason, 'meta': description['Changes']}
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
module.fail_json(msg=error_msg, exception=traceback.format_exc())
def get_stack_facts(cfn, stack_name):
try:
stack_response = cfn.describe_stacks(StackName=stack_name)
stack_info = stack_response['Stacks'][0]
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
if 'does not exist' in error_msg:
# missing stack, don't bail.
return None
# other error, bail.
raise err
if stack_response and stack_response.get('Stacks', None):
stacks = stack_response['Stacks']
if len(stacks):
stack_info = stacks[0]
return stack_info
def main():
argument_spec = ansible.module_utils.ec2.ec2_argument_spec()
argument_spec.update(dict(
stack_name=dict(required=True),
template_parameters=dict(required=False, type='dict', default={}),
state=dict(default='present', choices=['present', 'absent']),
template=dict(default=None, required=False, type='path'),
notification_arns=dict(default=None, required=False),
stack_policy=dict(default=None, required=False),
disable_rollback=dict(default=False, type='bool'),
on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']),
create_timeout=dict(default=None, type='int'),
template_url=dict(default=None, required=False),
template_body=dict(default=None, require=False),
template_format=dict(default=None, choices=['json', 'yaml'], required=False),
create_changeset=dict(default=False, type='bool'),
changeset_name=dict(default=None, required=False),
role_arn=dict(default=None, required=False),
tags=dict(default=None, type='dict'),
termination_protection=dict(default=None, type='bool'),
events_limit=dict(default=200, type='int'),
backoff_retries=dict(type='int', default=10, required=False),
backoff_delay=dict(type='int', default=3, required=False),
backoff_max_delay=dict(type='int', default=30, required=False),
capabilities=dict(type='list', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['template_url', 'template', 'template_body']],
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required for this module')
invalid_capabilities = []
user_capabilities = module.params.get('capabilities')
for user_cap in user_capabilities:
if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']:
invalid_capabilities.append(user_cap)
if invalid_capabilities:
module.fail_json(msg="Specified capabilities are invalid : %r,"
" please check documentation for valid capabilities" % invalid_capabilities)
# collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
stack_params = {
'Capabilities': user_capabilities,
'ClientRequestToken': to_native(uuid.uuid4()),
}
state = module.params['state']
stack_params['StackName'] = module.params['stack_name']
if module.params['template'] is not None:
with open(module.params['template'], 'r') as template_fh:
stack_params['TemplateBody'] = template_fh.read()
elif module.params['template_body'] is not None:
stack_params['TemplateBody'] = module.params['template_body']
elif module.params['template_url'] is not None:
stack_params['TemplateURL'] = module.params['template_url']
if module.params.get('notification_arns'):
stack_params['NotificationARNs'] = module.params['notification_arns'].split(',')
else:
stack_params['NotificationARNs'] = []
# can't check the policy when verifying.
if module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']:
with open(module.params['stack_policy'], 'r') as stack_policy_fh:
stack_params['StackPolicyBody'] = stack_policy_fh.read()
template_parameters = module.params['template_parameters']
stack_params['Parameters'] = []
for k, v in template_parameters.items():
if isinstance(v, dict):
# set parameter based on a dict to allow additional CFN Parameter Attributes
param = dict(ParameterKey=k)
if 'value' in v:
param['ParameterValue'] = str(v['value'])
if 'use_previous_value' in v and bool(v['use_previous_value']):
param['UsePreviousValue'] = True
param.pop('ParameterValue', None)
stack_params['Parameters'].append(param)
else:
# allow default k/v configuration to set a template parameter
stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})
if isinstance(module.params.get('tags'), dict):
stack_params['Tags'] = ansible.module_utils.ec2.ansible_dict_to_boto3_tag_list(module.params['tags'])
if module.params.get('role_arn'):
stack_params['RoleARN'] = module.params['role_arn']
result = {}
try:
region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(module, boto3=True)
cfn = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='cloudformation', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg=boto_exception(e))
# Wrap the cloudformation client methods that this module uses with
# automatic backoff / retry for throttling error codes
backoff_wrapper = AWSRetry.jittered_backoff(
retries=module.params.get('backoff_retries'),
delay=module.params.get('backoff_delay'),
max_delay=module.params.get('backoff_max_delay')
)
cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events)
cfn.create_stack = backoff_wrapper(cfn.create_stack)
cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets)
cfn.create_change_set = backoff_wrapper(cfn.create_change_set)
cfn.update_stack = backoff_wrapper(cfn.update_stack)
cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks)
cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources)
cfn.delete_stack = backoff_wrapper(cfn.delete_stack)
if boto_supports_termination_protection(cfn):
cfn.update_termination_protection = backoff_wrapper(cfn.update_termination_protection)
stack_info = get_stack_facts(cfn, stack_params['StackName'])
if module.check_mode:
if state == 'absent' and stack_info:
module.exit_json(changed=True, msg='Stack would be deleted', meta=[])
elif state == 'absent' and not stack_info:
module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[])
elif state == 'present' and not stack_info:
module.exit_json(changed=True, msg='New stack would be created', meta=[])
else:
module.exit_json(**check_mode_changeset(module, stack_params, cfn))
if state == 'present':
if not stack_info:
result = create_stack(module, stack_params, cfn, module.params.get('events_limit'))
elif module.params.get('create_changeset'):
result = create_changeset(module, stack_params, cfn, module.params.get('events_limit'))
else:
if module.params.get('termination_protection') is not None:
update_termination_protection(module, cfn, stack_params['StackName'],
bool(module.params.get('termination_protection')))
result = update_stack(module, stack_params, cfn, module.params.get('events_limit'))
# format the stack output
stack = get_stack_facts(cfn, stack_params['StackName'])
if stack is not None:
if result.get('stack_outputs') is None:
# always define stack_outputs, but it may be empty
result['stack_outputs'] = {}
for output in stack.get('Outputs', []):
result['stack_outputs'][output['OutputKey']] = output['OutputValue']
stack_resources = []
reslist = cfn.list_stack_resources(StackName=stack_params['StackName'])
for res in reslist.get('StackResourceSummaries', []):
stack_resources.append({
"logical_resource_id": res['LogicalResourceId'],
"physical_resource_id": res.get('PhysicalResourceId', ''),
"resource_type": res['ResourceType'],
"last_updated_time": res['LastUpdatedTimestamp'],
"status": res['ResourceStatus'],
"status_reason": res.get('ResourceStatusReason') # can be blank, apparently
})
result['stack_resources'] = stack_resources
elif state == 'absent':
# absent state is different because of the way delete_stack works.
# problem is it it doesn't give an error if stack isn't found
# so must describe the stack first
try:
stack = get_stack_facts(cfn, stack_params['StackName'])
if not stack:
result = {'changed': False, 'output': 'Stack not found.'}
else:
if stack_params.get('RoleARN') is None:
cfn.delete_stack(StackName=stack_params['StackName'])
else:
cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN'])
result = stack_operation(cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'),
stack_params.get('ClientRequestToken', None))
except Exception as err:
module.fail_json(msg=boto_exception(err), exception=traceback.format_exc())
if module.params['template_format'] is not None:
result['warnings'] = [('Argument `template_format` is deprecated '
'since Ansible 2.3, JSON and YAML templates are now passed '
'directly to the CloudFormation API.')]
module.exit_json(**result)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/python
import sys
import time
import serial
from serial.serialutil import SerialException
bootloader_error_codes = {
'0' : "OK",
'1' : "Intel HEX Invalid",
'2' : "Bad Checksum",
'3' : "Bad Address",
'4' : "Bad Record Type",
'5' : "Record Too Long"
}
def download_code(ihx_file, serial_port):
for line in ihx_file.readlines():
record_type = int(line[7:9], 16)
if (record_type == 0x00):
print "Writing", line[:-1],
serial_port.write(line)
rc = serial_port.read()
print " RC =", rc,
if rc in bootloader_error_codes:
print "(%s)" % bootloader_error_codes[rc]
else:
print "(Unknown Error)"
if (rc != '0'):
print "Error downloading code!"
return False
else:
print "Skipping non data record: '%s'" % line[:-1]
return True
def verify_code(ihx_file, serial_port):
can_read_any= None
for line in ihx_file.readlines():
record_type = int(line[7:9], 16)
if (record_type == 0x00):
length = int(line[1:3], 16)
start_addr = int(line[3:7], 16)
data = line[9:9+(length*2)]
# first time around, check if we can only read 16 byte chunks
if can_read_any == None:
can_read_any = False
do_flash_read(serial_port, start_addr, 1)
for read_data in serial_port:
read_data = read_data.strip()
if not read_data:
continue
if not read_data == ":00000001FF":
can_read_any = True
else:
break
if not can_read_any:
print "*** warning! this version of CC-Bootloader can only read 16 byte blocks!"
print "*** upgrade recommended!"
if can_read_any:
block_length= length
else:
block_length= ((length / 16) + 1) * 16
print "\rVerifying %04d bytes at address: %04X" % (length, start_addr),
do_flash_read(serial_port, start_addr, block_length)
verify_data= ''
for read_data in serial_port:
read_data= read_data.strip()
if (not data or read_data == ":00000001FF"):
break
# strip header and checksum
verify_data += read_data[9:-2]
if (data == verify_data[:length*2]):
print '(OK)',
else:
print 'Failed! Expected:', data, 'Got:', verify_data[:length*2]
exit(1)
sys.stdout.flush()
else:
print "Skipping non data record: '%s'" % line[:-1]
return True
def run_user_code(serial_port):
# User code is entered on intel HEX EOF record
serial_port.write(":00000001FF\n")
return True
def reset_bootloader(serial_port):
serial_port.write(":00000022DE\n")
rc = serial_port.read()
print "RC =", rc,
if rc in bootloader_error_codes:
print "(%s)" % bootloader_error_codes[rc]
else:
print "(Unknown Error)"
if (rc != '0'):
print "Error resetting bootloader!"
return False
return True
def erase_all_user(serial_port):
serial_port.write(":00000023DD\n")
rc = serial_port.read()
print "RC =", rc,
if rc in bootloader_error_codes:
print "(%s)" % bootloader_error_codes[rc]
else:
print "(Unknown Error)"
if (rc != '0'):
print "Error erasing all user flash!"
return False
return True
def erase_user_page(serial_port, page):
chksum = (0xDB + 0x100 - page) & 0xFF
serial_port.write(":01000024%02X%02X\n" % (page, chksum))
rc = serial_port.read()
print "RC =", rc,
if rc in bootloader_error_codes:
print "(%s)" % bootloader_error_codes[rc]
else:
print "(Unknown Error)"
if (rc != '0'):
print "Error erasing user flash page!"
return False
return True
def do_flash_read(serial_port, start_addr, length):
chksum = (0xD9 +
(0x100 - (start_addr & 0xFF)) +
(0x100 - ((start_addr>>8) & 0xFF)) +
(0x100 - (length & 0xFF)) +
(0x100 - ((length>>8) & 0xFF))
) & 0xFF
serial_port.write(":02%04X25%04X%02X\n" % (start_addr, length, chksum))
def flash_read(ihx_file, serial_port, start_addr, length):
do_flash_read(serial_port, start_addr, length)
for line in serial_port:
if not line == "\n":
if(ihx_file):
ihx_file.write(line)
else:
print line,
if (line == ":00000001FF\n"):
break
def print_usage():
import sys
print """
CC Bootloader Download Utility
Usage: ./bootload.py serial_port command
Commands:
download <hex_file>
Download hex_file to the device.
run
Run the user code.
reset
The bootloader will not erase pages that have previously been written to
before writing new data to that page. This allows for random access writes
but prevents you from overwriting downloaded code unless the device is
power cycled. This command will reset the bootloader's record of what
pages have been written to, allowing you to overwrite without power
cycling.
erase_all
Erases the entire user flash area.
erage <n>
Erases page n of the flash memory (organised into 1024 byte pages). The
bootloader occupies the first few pages and the rest are reserved for user
code. Attempting to erase a bootloader page will have no effect. To
determine which page the user code starts on please check the
USER_CODE_BASE setting in main.h.
read <start_addr> <len> [hex_file]
Reads len bytes from flash memory starting from start_addr and optionally
write to hex_file. start_addr and len should be specified in hexadecimal
(e.g. 0x1234).
verify <hex_file>
Verify hex_file matches device flash memory.
"""
if __name__ == '__main__':
import sys
if (len(sys.argv) < 3):
print_usage()
sys.exit(1)
serial_port_name = sys.argv[1]
command = sys.argv[2]
options = sys.argv[3:]
while True:
try:
serial_port = serial.Serial(serial_port_name, timeout=1)
break
except SerialException,e:
print "\nSomething is talking to the RfCat dongle (Modem Manager, most likely). Retrying again after 5 seconds. This can take a minute, please be patient."
time.sleep(6)
except KeyboardInterrupt:
print "Caught <CTRL-C>, exitting..."
exit (-2)
except Exception,e:
sys.excepthook(*sys.exc_info())
print e
exit (-1)
try:
if (command == 'download' or command == 'verify'):
if (len(options) < 1):
print_usage()
else:
ihx_filename = options[0]
ihx_file = open(ihx_filename, 'r')
if (command == 'download'):
download_code(ihx_file, serial_port)
else:
verify_code(ihx_file, serial_port)
elif (command == 'run'):
run_user_code(serial_port)
elif (command == 'reset'):
reset_bootloader(serial_port)
elif (command == 'erase_all'):
erase_all_user(serial_port)
elif (command == 'erase'):
if (len(options) < 1):
print_usage()
else:
erase_user_page(serial_port, int(options[0]))
elif (command == 'read'):
if (len(options) < 2):
print_usage()
else:
ihx_file = None
if(len(options) == 3):
try:
ihx_filename = options[2]
ihx_file = open(ihx_filename, 'w')
print 'reading to:', ihx_filename
except:
print "couldn't open output file:", ihx_filename
exit(2)
flash_read(ihx_file, serial_port, int(options[0], 16), int(options[1], 16))
else:
print_usage()
finally:
serial_port.close()
|
|
#!/usr/bin/env python
"""
Implementation of the p9sk1 authentication.
This module requires the Python Cryptography Toolkit from
http://www.amk.ca/python/writing/pycrypt/pycrypt.html
"""
import socket
import random
from Crypto.Cipher import DES
import py9p
class Error(py9p.Error):
pass
class AuthError(Error):
pass
class AuthsrvError(Error):
pass
TickReqLen = 141
TickLen = 72
AuthLen = 13
AuthTreq, AuthChal, AuthPass, AuthOK, AuthErr, AuthMod = range(1, 7)
AuthTs, AuthTc, AuthAs, AuthAc, AuthTp, AuthHr = range(64, 70)
AUTHPORT = 567
def pad(str, l, padch='\0'):
str += padch * (l - len(str))
return str[:l]
par = [0x01, 0x02, 0x04, 0x07, 0x08, 0x0b, 0x0d, 0x0e,
0x10, 0x13, 0x15, 0x16, 0x19, 0x1a, 0x1c, 0x1f,
0x20, 0x23, 0x25, 0x26, 0x29, 0x2a, 0x2c, 0x2f,
0x31, 0x32, 0x34, 0x37, 0x38, 0x3b, 0x3d, 0x3e,
0x40, 0x43, 0x45, 0x46, 0x49, 0x4a, 0x4c, 0x4f,
0x51, 0x52, 0x54, 0x57, 0x58, 0x5b, 0x5d, 0x5e,
0x61, 0x62, 0x64, 0x67, 0x68, 0x6b, 0x6d, 0x6e,
0x70, 0x73, 0x75, 0x76, 0x79, 0x7a, 0x7c, 0x7f,
0x80, 0x83, 0x85, 0x86, 0x89, 0x8a, 0x8c, 0x8f,
0x91, 0x92, 0x94, 0x97, 0x98, 0x9b, 0x9d, 0x9e,
0xa1, 0xa2, 0xa4, 0xa7, 0xa8, 0xab, 0xad, 0xae,
0xb0, 0xb3, 0xb5, 0xb6, 0xb9, 0xba, 0xbc, 0xbf,
0xc1, 0xc2, 0xc4, 0xc7, 0xc8, 0xcb, 0xcd, 0xce,
0xd0, 0xd3, 0xd5, 0xd6, 0xd9, 0xda, 0xdc, 0xdf,
0xe0, 0xe3, 0xe5, 0xe6, 0xe9, 0xea, 0xec, 0xef,
0xf1, 0xf2, 0xf4, 0xf7, 0xf8, 0xfb, 0xfd, 0xfe]
def expandKey(key):
"""Expand a 7-byte DES key into an 8-byte DES key"""
k = map(ord, key)
k64 = [k[0] >> 1,
(k[1] >> 2) | (k[0] << 6),
(k[2] >> 3) | (k[1] << 5),
(k[3] >> 4) | (k[2] << 4),
(k[4] >> 5) | (k[3] << 3),
(k[5] >> 6) | (k[4] << 2),
(k[6] >> 7) | (k[5] << 1),
k[6] << 0]
return "".join([chr(par[x & 0x7f]) for x in k64])
def newKey(key):
return DES.new(expandKey(key), DES.MODE_ECB)
def lencrypt(key, l):
"""Encrypt a list of characters, returning a list of characters"""
return list(key.encrypt("".join(l)))
def ldecrypt(key, l):
return list(key.decrypt("".join(l)))
def makeKey(password):
"""
Hash a password into a key.
"""
password = password[:28 - 1] + '\0'
n = len(password) - 1
password = pad(password, 28, ' ')
buf = list(password)
while 1:
t = map(ord, buf[:8])
k = [(((t[i]) >> i) + (t[i + 1] << (8 - (i + 1))) & 0xff) for i in xrange(7)]
key = "".join([chr(x) for x in k])
if n <= 8:
return key
n -= 8
if n < 8:
buf[:n] = []
else:
buf[:8] = []
buf[:8] = lencrypt(newKey(key), buf[:8])
def randChars(n):
"""
XXX This is *NOT* a secure way to generate random strings!
This should be fixed if this code is ever used in a serious manner.
"""
return "".join([chr(random.randint(0, 255)) for x in xrange(n)])
class Marshal(py9p.Marshal):
def __init__(self):
self.ks = None
self.kn = None
def setKs(self, ks):
self.ks = newKey(ks)
def setKn(self, kn):
self.kn = newKey(kn)
def encrypt(self, n, key):
"""Encrypt the last n bytes of the buffer with weird chaining."""
idx = len(self.bytes) - n
n -= 1
for dummy in xrange(n / 7):
self.bytes[idx: idx + 8] = lencrypt(key, self.bytes[idx: idx + 8])
idx += 7
if n % 7:
self.bytes[-8:] = lencrypt(key, self.bytes[-8:])
def decrypt(self, n, key):
"""Decrypt the first n bytes of the buffer."""
if key is None:
return
m = n - 1
if m % 7:
self.bytes[n - 8:n] = ldecrypt(key, self.bytes[n - 8:n])
idx = m - m % 7
for dummy in xrange(m / 7):
idx -= 7
self.bytes[idx: idx + 8] = ldecrypt(key, self.bytes[idx: idx + 8])
def encPad(self, x, l):
self.encX(pad(x, l))
def decPad(self, l):
x = self.decX(l)
idx = x.find('\0')
if idx >= 0:
x = x[:idx]
return x
def encChal(self, x):
_checkLen(x, 8)
self.encX(x)
def decChal(self):
return self.decX(8)
def encTicketReq(self, x):
type, authid, authdom, chal, hostid, uid = x
self.enc1(type)
self.encPad(authid, 28)
self.encPad(authdom, 48)
self.encChal(chal)
self.encPad(hostid, 28)
self.encPad(uid, 28)
def decTicketReq(self):
return [self.dec1(),
self.decPad(28),
self.decPad(48),
self.decChal(),
self.decPad(28),
self.decPad(28)]
def encTicket(self, x):
num, chal, cuid, suid, key = x
_checkLen(key, 7)
self.enc1(num)
self.encChal(chal)
self.encPad(cuid, 28)
self.encPad(suid, 28)
self.encX(key)
self.encrypt(1 + 8 + 28 + 28 + 7, self.ks)
def decTicket(self):
self.decrypt(1 + 8 + 28 + 28 + 7, self.ks)
return [self.dec1(),
self.decChal(),
self.decPad(28),
self.decPad(28),
self.decX(7)]
def encAuth(self, x):
num, chal, id = x
self.enc1(num)
self.encChal(chal)
self.enc4(id)
self.encrypt(1 + 8 + 4, self.kn)
def decAuth(self):
self.decrypt(1 + 8 + 4, self.kn)
return [self.dec1(),
self.decChal(),
self.dec4()]
def encTattach(self, x):
tick, auth = x
_checkLen(tick, 72)
self.encX(tick)
self.encAuth(auth)
def decTattach(self):
return self.decX(72), self.decAuth()
def getTicket(con, sk1, treq):
"""
Connect to the auth server and request a set of tickets.
Con is an open handle to the auth server, sk1 is a handle
to a sk1 marshaller with Kc set and treq is a ticket request.
Return the (opaque) server ticket and the (decoded) client ticket.
Raises an AuthsrvError on failure.
"""
sk1.setBuf()
sk1.encTicketReq(treq)
con.send(sk1.getBuf())
ch = con.recv(1)
if ch == chr(5):
err = con.recv(64)
raise AuthsrvError(err)
elif ch != chr(4):
raise AuthsrvError("invalid reply type %r" % ch)
ctick = con.recv(72)
stick = con.recv(72)
if len(stick) + len(ctick) != 72 * 2:
raise AuthsrvError("short auth reply")
sk1.setBuf(ctick)
return sk1.decTicket(), stick
# this could be cleaner
def clientAuth(cl, fcall, user, Kc, authsrv, authport=567):
CHc = randChars(8)
sk1 = Marshal()
sk1.setKs(Kc)
pos = [0]
gen = 0
def rd(l):
fc = cl._read(fcall.afid, pos[0], l)
pos[0] += len(fc.data)
return fc.data
def wr(x):
fc = cl._write(fcall.afid, pos[0], x)
pos[0] += fc.count
return fc.count
# negotiate
proto = rd(128)
v2 = 0
if proto[:10] == 'v.2 p9sk1@':
v2 = 1
proto = proto[4:]
if proto[:6] != 'p9sk1@':
raise AuthError("unknown protocol %r" % proto)
wr(proto.replace("@", " ", 1))
if v2:
if rd(3) != 'OK\0':
raise AuthError("v.2 protocol botch")
# Tsession
sk1.setBuf()
sk1.encChal(CHc)
wr(sk1.getBuf())
# Rsession
sk1.setBuf(rd(TickReqLen))
treq = sk1.decTicketReq()
if v2 and treq[0] == 0: # kenfs is fast and loose with auth formats
treq[0] = AuthTreq
if treq[0] != AuthTreq:
raise AuthError("bad server")
CHs = treq[3]
# request ticket from authsrv
treq[-2], treq[-1] = user, user
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((authsrv, authport), )
(num, CHs2, cuid, suid, Kn), stick = getTicket(s, sk1, treq) # XXX catch
s.close()
if num != AuthTc or CHs != CHs2:
raise AuthError("bad password for %s or bad auth server" % user)
sk1.setKn(Kn)
# Tattach
sk1.setBuf()
sk1.encTattach([stick, [AuthAc, CHs, gen]])
wr(sk1.getBuf())
sk1.setBuf(rd(AuthLen))
num, CHc2, gen2 = sk1.decAuth()
if num != AuthAs or CHc2 != CHc: # XXX check gen2 for replay
raise AuthError("bad server")
return
class AuthFs(object):
"""
A special file for performing p9sk1 authentication. On completion
of the protocol, suid is set to the authenticated username.
"""
type = 'sk1'
HaveProtos, HaveSinfo, HaveSauth, NeedProto, NeedCchal, NeedTicket, Success = range(7)
cancreate = 0
def __init__(self, user, dom, key):
self.sk1 = Marshal()
self.user = user
self.dom = dom
self.ks = key
def estab(self, fid):
fid.CHs = randChars(8)
fid.CHc = None
fid.suid = None
fid.treq = [AuthTreq, self.user, self.dom, fid.CHs, '', '']
fid.phase = self.HaveProtos
def read(self, srv, req):
pos = req.ifcall.offset
len = req.ifcall.count
self.sk1.setBuf()
if req.fid.phase == self.HaveProtos:
req.fid.phase = self.NeedProto
req.ofcall.data = "p9sk1@%s\0" % self.dom
srv.respond(req, None)
return
elif req.fid.phase == self.HaveSinfo:
req.fid.phase = self.NeedTicket
self.sk1.encTicketReq(req.fid.treq)
req.ofcall.data = self.sk1.getBuf()
srv.respond(req, None)
return
elif req.fid.phase == self.HaveSauth:
req.fid.phase = self.Success
self.sk1.encAuth([AuthAs, req.fid.CHc, 0])
req.ofcall.data = self.sk1.getBuf()
srv.respond(req, None)
return
srv.respond(req, "unexpected phase")
def write(self, srv, req):
pos = req.ifcall.offset
buf = req.ifcall.data
self.sk1.setBuf(buf)
if req.fid.phase == self.NeedProto:
l = buf.index("\0")
if l < 0:
raise AuthsrvError("missing terminator")
s = buf.split(" ")
if len(s) != 2 or s[0] != "p9sk1" or s[1] != self.dom + '\0':
raise AuthsrvError("bad protocol %r" % buf)
req.fid.phase = self.NeedCchal
req.ofcall.count = l + 1
srv.respond(req, None)
return
elif req.fid.phase == self.NeedCchal:
req.fid.CHc = self.sk1.decChal()
req.fid.phase = self.HaveSinfo
req.ofcall.count = 8
srv.respond(req, None)
return
elif req.fid.phase == self.NeedTicket:
self.sk1.setKs(self.ks)
num, chal, cuid, suid, key = self.sk1.decTicket()
if num != AuthTs or chal != req.fid.CHs:
raise AuthsrvError("bad ticket")
self.sk1.setKn(key)
num, chal, id = self.sk1.decAuth()
if num != AuthAc or chal != req.fid.CHs or id != 0:
raise AuthsrvError("bad authentication for %s" % suid)
req.fid.suid = suid
req.fid.phase = self.HaveSauth
req.ofcall.count = 72 + 13
srv.respond(req, None)
return
raise AuthsrvError("unexpected phase")
|
|
# -*- coding: utf-8 -*-
# Unfortunately, we have to fix a few App Engine bugs here because otherwise
# not all of our features will work. Still, we should keep the number of bug
# fixes to a minimum and report everything to Google, please:
# http://code.google.com/p/googleappengine/issues/list
from google.appengine.ext import db
from google.appengine.ext.db import polymodel
import logging, os, re, sys
base_path = os.path.abspath(os.path.dirname(__file__))
get_verbose_name = lambda class_name: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\1', class_name).lower().strip()
DEFAULT_NAMES = ('verbose_name', 'ordering', 'permissions', 'app_label',
'abstract', 'db_table', 'db_tablespace')
# Add checkpoints to patching procedure, so we don't apply certain patches
# multiple times. This can happen if an exeception gets raised on the first
# request of an instance. In that case, main.py gets reloaded and patch_all()
# gets executed yet another time.
done_patch_all = False
def patch_all():
global done_patch_all
if done_patch_all:
return
patch_python()
patch_app_engine()
# Add signals: post_save_committed, post_delete_committed
from appenginepatcher import transactions
setup_logging()
done_patch_all = True
def patch_python():
# Remove modules that we want to override. Don't remove modules that we've
# already overridden.
for module in ('memcache',):
if module in sys.modules and \
not sys.modules[module].__file__.startswith(base_path):
del sys.modules[module]
# For some reason the imp module can't be replaced via sys.path
from appenginepatcher import have_appserver
if have_appserver:
from appenginepatcher import imp
sys.modules['imp'] = imp
if have_appserver:
def unlink(_):
raise NotImplementedError('App Engine does not support FS writes!')
os.unlink = unlink
def patch_app_engine():
# This allows for using Paginator on a Query object. We limit the number
# of results to 301, so there won't be any timeouts (301, so you can say
# "more than 300 results").
def __len__(self):
return self.count(301)
db.Query.__len__ = __len__
# Add "model" property to Query (needed by generic views)
class ModelProperty(object):
def __get__(self, query, unused):
try:
return query._Query__model_class
except:
return query._model_class
db.Query.model = ModelProperty()
db.GqlQuery.model = ModelProperty()
# Add a few Model methods that are needed for serialization and ModelForm
def _get_pk_val(self):
if self.has_key():
return unicode(self.key())
else:
return None
db.Model._get_pk_val = _get_pk_val
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._get_pk_val() == other._get_pk_val()
db.Model.__eq__ = __eq__
def __ne__(self, other):
return not self.__eq__(other)
db.Model.__ne__ = __ne__
def pk(self):
return self._get_pk_val()
db.Model.id = db.Model.pk = property(pk)
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
from django.db.models.fields import FieldDoesNotExist
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
db.Model.serializable_value = serializable_value
# Make Property more Django-like (needed for serialization and ModelForm)
db.Property.serialize = True
db.Property.editable = True
db.Property.help_text = ''
def blank(self):
return not self.required
db.Property.blank = property(blank)
def _get_verbose_name(self):
if not getattr(self, '_verbose_name', None):
self._verbose_name = self.name.replace('_', ' ')
return self._verbose_name
def _set_verbose_name(self, verbose_name):
self._verbose_name = verbose_name
db.Property.verbose_name = property(_get_verbose_name, _set_verbose_name)
def attname(self):
return self.name
db.Property.attname = property(attname)
class Rel(object):
def __init__(self, property):
self.field_name = 'key'
self.property = property
self.to = property.reference_class
self.multiple = True
self.parent_link = False
self.related_name = getattr(property, 'collection_name', None)
self.through = None
class RelProperty(object):
def __get__(self, property, cls):
if property is None:
return self
if not hasattr(property, 'reference_class'):
return None
if not hasattr(property, '_rel_cache'):
property._rel_cache = Rel(property)
return property._rel_cache
db.Property.rel = RelProperty()
def formfield(self, **kwargs):
return self.get_form_field(**kwargs)
db.Property.formfield = formfield
# Add repr to make debugging a little bit easier
def __repr__(self):
data = []
if self.has_key():
if self.key().name():
data.append('key_name='+repr(self.key().name()))
else:
data.append('key_id='+repr(self.key().id()))
for field in self._meta.fields:
try:
data.append(field.name+'='+repr(getattr(self, field.name)))
except:
data.append(field.name+'='+repr(field.get_value_for_datastore(self)))
return u'%s(%s)' % (self.__class__.__name__, ', '.join(data))
db.Model.__repr__ = __repr__
# Add default __str__ and __unicode__ methods
def __str__(self):
return unicode(self).encode('utf-8')
db.Model.__str__ = __str__
def __unicode__(self):
return unicode(repr(self))
db.Model.__unicode__ = __unicode__
# Replace save() method with one that calls put(), so a monkey-patched
# put() will also work if someone uses save()
def save(self):
self.put()
db.Model.save = save
# Add _meta to Model, so porting code becomes easier (generic views,
# xheaders, and serialization depend on it).
from django.conf import settings
from django.utils.encoding import force_unicode, smart_str
from django.utils.translation import string_concat, get_language, \
activate, deactivate_all
class _meta(object):
many_to_many = ()
class pk:
name = 'key'
attname = 'pk'
def __init__(self, model, bases):
try:
self.app_label = model.__module__.split('.')[-2]
except IndexError:
raise ValueError('Django expects models (here: %s.%s) to be defined in their own apps!' % (model.__module__, model.__name__))
self.parents = [b for b in bases if issubclass(b, db.Model)]
self.object_name = model.__name__
self.module_name = self.object_name.lower()
self.verbose_name = get_verbose_name(self.object_name)
self.ordering = ()
self.abstract = model is db.Model
self.model = model
self.unique_together = ()
self.installed = model.__module__.rsplit('.', 1)[0] in \
settings.INSTALLED_APPS
self.permissions = []
meta = model.__dict__.get('Meta')
if meta:
meta_attrs = meta.__dict__.copy()
for name in meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
elif hasattr(meta, attr_name):
setattr(self, attr_name, getattr(meta, attr_name))
# verbose_name_plural is a special case because it uses a 's'
# by default.
setattr(self, 'verbose_name_plural', meta_attrs.pop('verbose_name_plural', string_concat(self.verbose_name, 's')))
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError, "'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys())
else:
self.verbose_name_plural = self.verbose_name + 's'
if not self.abstract:
self.permissions.extend([
('add_%s' % self.object_name.lower(),
string_concat('Can add ', self.verbose_name)),
('change_%s' % self.object_name.lower(),
string_concat('Can change ', self.verbose_name)),
('delete_%s' % self.object_name.lower(),
string_concat('Can delete ', self.verbose_name)),
])
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_str(self.app_label), smart_str(self.module_name))
def _set_db_table(self, db_table):
self._db_table = db_table
def _get_db_table(self):
if getattr(settings, 'DJANGO_STYLE_MODEL_KIND', True):
if hasattr(self, '_db_table'):
return self._db_table
return '%s_%s' % (self.app_label, self.module_name)
return self.object_name
db_table = property(_get_db_table, _set_db_table)
def _set_db_tablespace(self, db_tablespace):
self._db_tablespace = db_tablespace
def _get_db_tablespace(self):
if hasattr(self, '_db_tablespace'):
return self._db_tablespace
return settings.DEFAULT_TABLESPACE
db_tablespace = property(_get_db_tablespace, _set_db_tablespace)
@property
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
lang = get_language()
deactivate_all()
raw = force_unicode(self.verbose_name)
activate(lang)
return raw
@property
def local_fields(self):
return tuple(sorted([p for p in self.model.properties().values()
if not isinstance(p, db.ListProperty)],
key=lambda prop: prop.creation_counter))
@property
def local_many_to_many(self):
return tuple(sorted([p for p in self.model.properties().values()
if isinstance(p, db.ListProperty) and
not (issubclass(self.model,
polymodel.PolyModel)
and p.name == 'class')],
key=lambda prop: prop.creation_counter))
@property
def fields(self):
return self.local_fields + self.local_many_to_many
def get_field(self, name, many_to_many=True):
"""
Returns the requested field by name. Raises FieldDoesNotExist on error.
"""
for f in self.fields:
if f.name == name:
return f
from django.db.models.fields import FieldDoesNotExist
raise FieldDoesNotExist, '%s has no field named %r' % (self.object_name, name)
def get_all_related_objects(self, local_only=False):
try:
self._related_objects_cache
except AttributeError:
self._fill_related_objects_cache()
if local_only:
return [k for k, v in self._related_objects_cache.items() if not v]
return self._related_objects_cache.keys()
def get_all_related_objects_with_model(self):
"""
Returns a list of (related-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
self._related_objects_cache
except AttributeError:
self._fill_related_objects_cache()
return self._related_objects_cache.items()
def _fill_related_objects_cache(self):
from django.db.models.loading import get_models
from django.db.models.related import RelatedObject
from django.utils.datastructures import SortedDict
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_objects_with_model():
if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models():
for f in klass._meta.local_fields:
if f.rel and not isinstance(f.rel.to, str) and self == f.rel.to._meta:
cache[RelatedObject(f.rel.to, klass, f)] = None
self._related_objects_cache = cache
def get_all_related_many_to_many_objects(self, local_only=False):
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
if local_only:
return [k for k, v in cache.items() if not v]
return cache.keys()
def get_all_related_m2m_objects_with_model(self):
"""
Returns a list of (related-m2m-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
return cache.items()
def _fill_related_many_to_many_cache(self):
from django.db.models.loading import get_models, app_cache_ready
from django.db.models.related import RelatedObject
from django.utils.datastructures import SortedDict
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_m2m_objects_with_model():
if obj.field.creation_counter < 0 and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models():
for f in klass._meta.local_many_to_many:
if f.rel and not isinstance(f.rel.to, str) and self == f.rel.to._meta:
cache[RelatedObject(f.rel.to, klass, f)] = None
if app_cache_ready():
self._related_many_to_many_cache = cache
return cache
def get_add_permission(self):
return 'add_%s' % self.object_name.lower()
def get_change_permission(self):
return 'change_%s' % self.object_name.lower()
def get_delete_permission(self):
return 'delete_%s' % self.object_name.lower()
def get_ordered_objects(self):
return []
def get_parent_list(self):
"""
Returns a list of all the ancestor of this model as a list. Useful for
determining if something is an ancestor, regardless of lineage.
"""
result = set()
for parent in self.parents:
result.add(parent)
result.update(parent._meta.get_parent_list())
return result
# Required to support reference properties to db.Model
db.Model._meta = _meta(db.Model, ())
def _initialize_model(cls, bases):
cls._meta = _meta(cls, bases)
cls._default_manager = cls
if not cls._meta.abstract:
from django.db.models.loading import register_models
register_models(cls._meta.app_label, cls)
# Register models with Django
from django.db.models import signals
if not hasattr(db.PropertiedClass.__init__, 'patched'):
old_propertied_class_init = db.PropertiedClass.__init__
def __init__(cls, name, bases, attrs, map_kind=True):
"""Creates a combined appengine and Django model.
The resulting model will be known to both the appengine libraries
and Django.
"""
_initialize_model(cls, bases)
old_propertied_class_init(cls, name, bases, attrs,
not cls._meta.abstract)
signals.class_prepared.send(sender=cls)
__init__.patched = True
db.PropertiedClass.__init__ = __init__
if not hasattr(polymodel.PolymorphicClass.__init__, 'patched'):
old_poly_init = polymodel.PolymorphicClass.__init__
def __init__(cls, name, bases, attrs):
if polymodel.PolyModel not in bases:
_initialize_model(cls, bases)
old_poly_init(cls, name, bases, attrs)
if polymodel.PolyModel not in bases:
signals.class_prepared.send(sender=cls)
__init__.patched = True
polymodel.PolymorphicClass.__init__ = __init__
@classmethod
def kind(cls):
return cls._meta.db_table
db.Model.kind = kind
# Add model signals
if not hasattr(db.Model.__init__, 'patched'):
old_model_init = db.Model.__init__
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args,
kwargs=kwargs)
old_model_init(self, *args, **kwargs)
signals.post_init.send(sender=self.__class__, instance=self)
__init__.patched = True
db.Model.__init__ = __init__
if not hasattr(db.Model.put, 'patched'):
old_put = db.Model.put
def put(self, *args, **kwargs):
raw = False
signals.pre_save.send(sender=self.__class__, instance=self, raw=raw)
created = not self.is_saved()
result = old_put(self, *args, **kwargs)
signals.post_save.send(sender=self.__class__, instance=self,
created=created, raw=raw)
return result
put.patched = True
db.Model.put = put
if not hasattr(db.Model.delete, 'patched'):
old_delete = db.Model.delete
def delete(self, *args, **kwargs):
signals.pre_delete.send(sender=self.__class__, instance=self)
result = old_delete(self, *args, **kwargs)
signals.post_delete.send(sender=self.__class__, instance=self)
return result
delete.patched = True
db.Model.delete = delete
# This has to come last because we load Django here
from django.db.models.fields import BLANK_CHOICE_DASH
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
if self.rel:
return first_choice + [(obj.pk, unicode(obj))
for obj in self.rel.to.all().fetch(301)]
return first_choice
db.Property.get_choices = get_choices
fix_app_engine_bugs()
def fix_app_engine_bugs():
# Fix handling of verbose_name. Google resolves lazy translation objects
# immedately which of course breaks translation support.
# http://code.google.com/p/googleappengine/issues/detail?id=583
from django import forms
from django.utils.text import capfirst
# This import is needed, so the djangoforms patch can do its work, first
from google.appengine.ext.db import djangoforms
def get_form_field(self, form_class=forms.CharField, **kwargs):
defaults = {'required': self.required}
defaults['label'] = capfirst(self.verbose_name)
if self.choices:
choices = []
if not self.required or (self.default is None and
'initial' not in kwargs):
choices.append(('', '---------'))
for choice in self.choices:
choices.append((unicode(choice), unicode(choice)))
defaults['widget'] = forms.Select(choices=choices)
if self.default is not None:
defaults['initial'] = self.default
defaults.update(kwargs)
return form_class(**defaults)
db.Property.get_form_field = get_form_field
# Extend ModelForm with support for EmailProperty
# http://code.google.com/p/googleappengine/issues/detail?id=880
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for an email property."""
defaults = {'form_class': forms.EmailField}
defaults.update(kwargs)
return super(db.EmailProperty, self).get_form_field(**defaults)
db.EmailProperty.get_form_field = get_form_field
# Fix DateTimeProperty, so it returns a property even for auto_now and
# auto_now_add.
# http://code.google.com/p/googleappengine/issues/detail?id=994
def get_form_field(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(db.DateTimeProperty, self).get_form_field(**defaults)
db.DateTimeProperty.get_form_field = get_form_field
def get_form_field(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(db.DateProperty, self).get_form_field(**defaults)
db.DateProperty.get_form_field = get_form_field
def get_form_field(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(db.TimeProperty, self).get_form_field(**defaults)
db.TimeProperty.get_form_field = get_form_field
# Improve handing of StringListProperty
def get_form_field(self, **defaults):
defaults['required'] = False
return super(db.StringListProperty, self).get_form_field(**defaults)
db.StringListProperty.get_form_field = get_form_field
# Fix file uploads via BlobProperty
def get_form_field(self, **kwargs):
defaults = {'form_class': forms.FileField}
defaults.update(kwargs)
return super(db.BlobProperty, self).get_form_field(**defaults)
db.BlobProperty.get_form_field = get_form_field
def get_value_for_form(self, instance):
return getattr(instance, self.name)
db.BlobProperty.get_value_for_form = get_value_for_form
from django.core.files.uploadedfile import UploadedFile
def make_value_from_form(self, value):
if isinstance(value, UploadedFile):
return db.Blob(value.read())
return super(db.BlobProperty, self).make_value_from_form(value)
db.BlobProperty.make_value_from_form = make_value_from_form
# Optimize ReferenceProperty, so it returns the key directly
# http://code.google.com/p/googleappengine/issues/detail?id=993
def get_value_for_form(self, instance):
return self.get_value_for_datastore(instance)
db.ReferenceProperty.get_value_for_form = get_value_for_form
# Use our ModelChoiceField instead of Google's
def get_form_field(self, **kwargs):
defaults = {'form_class': forms.ModelChoiceField,
'queryset': self.reference_class.all()}
defaults.update(kwargs)
return super(db.ReferenceProperty, self).get_form_field(**defaults)
db.ReferenceProperty.get_form_field = get_form_field
def setup_logging():
from django.conf import settings
if settings.DEBUG:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
|
|
"""
Module containing classes for high-level network parameters and methods
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import next
from builtins import open
from builtins import range
# required to make json saving work in Python 2/3
try:
to_unicode = unicode
except NameError:
to_unicode = str
try:
basestring
except NameError:
basestring = str
from future import standard_library
standard_library.install_aliases()
from collections import OrderedDict
from .dicts import Dict, ODict
from .. import conversion
# ----------------------------------------------------------------------------
# PopParams class
# ----------------------------------------------------------------------------
class PopParams(ODict):
"""
Class to hold population parameters
"""
def setParam(self, label, param, value):
if label in self:
d = self[label]
else:
return False
dimParams = ['numCells', 'density', 'gridSpacing']
if param in dimParams:
for removeParam in dimParams: d.pop(removeParam, None) # remove other properties
d[param] = value
return True
def rename(self, old, new, label=None):
return self.__rename__(old, new, label)
# ----------------------------------------------------------------------------
# CellParams class
# ----------------------------------------------------------------------------
class CellParams(ODict):
"""
Class to hold cell parameters
"""
def setParam(self, label, param, value):
if label in self:
d = self[label]
else:
return False
d[param] = value
return True
def rename(self, old, new, label=None):
success = self.__rename__(old, new, label)
try:
# special case: renaming cellParams[x]['secs'] requires updating topology
if isinstance(label, (list, tuple)) and 'secs' in self[label[0]]:
d = self[label[0]]
for sec in list(d['secs'].values()): # replace appearences in topol
if sec['topol'].get('parentSec') == old:
sec['topol']['parentSec'] = new
return success
except:
return False
# ----------------------------------------------------------------------------
# ConnParams class
# ----------------------------------------------------------------------------
class ConnParams(ODict):
"""
Class to hold connectivity parameters
"""
def setParam(self, label, param, value):
if label in self:
d = self[label]
else:
return False
d[param] = value
return True
def rename(self, old, new, label=None):
return self.__rename__(old, new, label)
# ----------------------------------------------------------------------------
# SynMechParams class
# ----------------------------------------------------------------------------
class SynMechParams(ODict):
"""
Class to hold synaptic mechanism parameters
"""
def setParam(self, label, param, value):
if label in self:
d = self[label]
else:
return False
d[param] = value
return True
def rename(self, old, new, label=None):
return self.__rename__(old, new, label)
# ----------------------------------------------------------------------------
# SubConnParams class
# ----------------------------------------------------------------------------
class SubConnParams(ODict):
"""
Class to hold subcellular connectivity parameters
"""
def setParam(self, label, param, value):
if label in self:
d = self[label]
else:
return False
d[param] = value
return True
def rename(self, old, new, label=None):
return self.__rename__(old, new, label)
# ----------------------------------------------------------------------------
# StimSourceParams class
# ----------------------------------------------------------------------------
class StimSourceParams(ODict):
"""
Class to hold stimulation source parameters
"""
def setParam(self, label, param, value):
if label in self:
d = self[label]
else:
return False
d[param] = value
return True
def rename(self, old, new, label=None):
return self.__rename__(old, new, label)
# ----------------------------------------------------------------------------
# StimTargetParams class
# ----------------------------------------------------------------------------
class StimTargetParams(ODict):
"""
Class to hold stimulation target parameters
"""
def setParam(self, label, param, value):
if label in self:
d = self[label]
else:
return False
d[param] = value
return True
def rename(self, old, new, label=None):
return self.__rename__(old, new, label)
# ----------------------------------------------------------------------------
# RxD class
# ----------------------------------------------------------------------------
class RxDParams(ODict):
"""
Class to hold reaction-diffusion (RxD) parameters
"""
def setParam(self, label, param, value):
if label in self:
d = self[label]
else:
return False
d[param] = value
return True
def rename(self, old, new, label=None):
return self.__rename__(old, new, label)
# ----------------------------------------------------------------------------
# NETWORK PARAMETERS CLASS
# ----------------------------------------------------------------------------
class NetParams(object):
"""
Class to hold all network parameters
"""
def __init__(self, netParamsDict=None):
self._labelid = 0
# General network parameters
self.scale = 1 # scale factor for number of cells
self.sizeX = 100 # x-dimension (horizontal length) size in um
self.sizeY = 100 # y-dimension (vertical height or cortical depth) size in um
self.sizeZ = 100 # z-dimension (horizontal depth) size in um
self.shape = 'cuboid' # network shape ('cuboid', 'cylinder' or 'ellipsoid')
self.rotateCellsRandomly = False # random rotation of cells around y-axis [min,max] radians, e.g. [0, 3.0]
self.defineCellShapes = False # convert stylized cell geometries to 3d points (calls h.define_shape)
self.correctBorder = False # distance (um) from which to correct connectivity border effect, [x,y,z] eg. [100,150,150]
self.cellsVisualizationSpacingMultiplier = [1, 1, 1] # x,y,z scaling factor for spacing between cells during visualization
## General connectivity parameters
self.scaleConnWeight = 1 # Connection weight scale factor (NetStims not included)
self.scaleConnWeightNetStims = 1 # Connection weight scale factor for NetStims
self.scaleConnWeightModels = False # Connection weight scale factor for each cell model eg. {'Izhi2007': 0.1, 'Friesen': 0.02}
self.defaultWeight = 1 # default connection weight
self.defaultDelay = 1 # default connection delay (ms)
self.defaultThreshold = 10 # default Netcon threshold (mV)
self.propVelocity = 500.0 # propagation velocity (um/ms)
# mapping between cfg and netParams
self.mapping = {}
# Cell params dict
self.cellParams = CellParams()
# Population params dict
self.popParams = PopParams() # create list of populations - each item will contain dict with pop params
self.popTagsCopiedToCells = ['cellModel', 'cellType']
# Synaptic mechanism params dict
self.synMechParams = SynMechParams()
# Connectivity params dict
self.connParams = ConnParams()
# Subcellular connectivity params dict
self.subConnParams = SubConnParams()
# Stimulation source and target params dicts
self.stimSourceParams = StimSourceParams()
self.stimTargetParams = StimTargetParams()
# RxD params dicts
self.rxdParams = RxDParams()
# fill in params from dict passed as argument
if netParamsDict:
netParamsComponents = ['cellParams', 'popParams', 'synMechParams', 'connParams', 'subConnParams', 'stimSourceParams', 'stimTargetParams', 'rxdParams']
for k,v in netParamsDict.items():
if k in netParamsComponents:
for k2, v2 in netParamsDict[k].items():
if isinstance(v2, OrderedDict):
getattr(self, k)[k2] = ODict(v2)
elif isinstance(v2, dict):
getattr(self, k)[k2] = ODict(v2)
else:
getattr(self, k)[k2] = v2
elif isinstance(v, OrderedDict):
setattr(self, k, ODict(v))
elif isinstance(v, dict):
setattr(self, k, Dict(v))
else:
setattr(self, k, v)
def save(self, filename):
import os
from .. import sim
basename = os.path.basename(filename)
folder = filename.split(basename)[0]
ext = basename.split('.')[1]
# make dir
try:
os.mkdir(folder)
except OSError:
if not os.path.exists(folder):
print(' Could not create', folder)
dataSave = {'net': {'params': self.todict()}}
# Save to json file
if ext == 'json':
print(('Saving netParams to %s ... ' % (filename)))
sim.saveJSON(filename, dataSave)
def addCellParams(self, label=None, params=None):
if not label:
label = int(self._labelid)
self._labelid += 1
self.cellParams[label] = Dict(params)
def addPopParams(self, label=None, params=None):
if not label:
label = int(self._labelid)
self._labelid += 1
self.popParams[label] = Dict(params)
def addSynMechParams(self, label=None, params=None):
if not label:
label = int(self._labelid)
self._labelid += 1
self.synMechParams[label] = Dict(params)
def addConnParams(self, label=None, params=None):
if not label:
label = int(self._labelid)
self._labelid += 1
self.connParams[label] = Dict(params)
def addSubConnParams(self, label=None, params=None):
if not label:
label = int(self._labelid)
self._labelid += 1
self.subConnParams[label] = Dict(params)
def addStimSourceParams(self, label=None, params=None):
if not label:
label = int(self._labelid)
self._labelid += 1
self.stimSourceParams[label] = Dict(params)
def addStimTargetParams(self, label=None, params=None):
if not label:
label = int(self._labelid)
self._labelid += 1
self.stimTargetParams[label] = Dict(params)
# def rename(self, attr, old, new):
# try:
# obj = getattr(self, attr)
# except:
# print 'Error renaming: netParams does not contain %s' % (attr)
# return False
# if old not in obj:
# print 'Error renaming: netParams.%s rule does not contain %s' % (attribute, old)
# return False
# obj[new] = obj.pop(old) # replace
# return True
def importCellParams(self, label, fileName, cellName, conds={}, cellArgs=None, importSynMechs=False, somaAtOrigin=True, cellInstance=False):
if cellArgs is None: cellArgs = {}
if not label:
label = int(self._labelid)
self._labelid += 1
secs, secLists, synMechs, globs = conversion.importCell(fileName, cellName, cellArgs, cellInstance)
cellRule = {'conds': conds, 'secs': secs, 'secLists': secLists, 'globals': globs}
# adjust cell 3d points so that soma is at location 0,0,0
if somaAtOrigin:
somaSec = next((sec for sec in cellRule['secs'] if 'soma' in sec), None)
if not somaSec or not 'pt3d' in cellRule['secs'][somaSec]['geom']:
pass
#print('Warning: cannot place soma at origin because soma does not exist or does not contain pt3d')
else:
soma3d = cellRule['secs'][somaSec]['geom']['pt3d']
midpoint = int(len(soma3d)/2)
somaX, somaY, somaZ = soma3d[midpoint][0:3]
for sec in list(cellRule['secs'].values()):
if 'pt3d' in sec['geom']:
for i,pt3d in enumerate(sec['geom']['pt3d']):
sec['geom']['pt3d'][i] = (pt3d[0] - somaX, pt3d[1] - somaY, pt3d[2] - somaZ, pt3d[3])
self.addCellParams(label, cellRule)
if importSynMechs:
for synMech in synMechs: self.addSynMechParams(cellName+'_'+synMech.pop('label'), synMech)
return self.cellParams[label]
def importCellParamsFromNet(self, labelList, condsList, fileName, cellNameList, importSynMechs=False):
conversion.importCellsFromNet(self, fileName, labelList, condsList, cellNameList, importSynMechs)
return self.cellParams
def addCellParamsSecList(self, label, secListName, somaDist=None, somaDistY=None):
import numpy as np
if label in self.cellParams:
cellRule = self.cellParams[label]
else:
print('Error adding secList: netParams.cellParams does not contain %s' % (label))
return
if somaDist is not None and (not isinstance(somaDist, list) or len(somaDist) != 2):
print('Error adding secList: somaDist should be a list with 2 elements')
return
if somaDistY is not None and (not isinstance(somaDistY, list) or len(somaDistY) != 2):
print('Error adding secList: somaDistY should be a list with 2 elements')
return
secList = []
for secName, sec in cellRule.secs.items():
if 'pt3d' in sec['geom']:
pt3d = sec['geom']['pt3d']
midpoint = int(len(pt3d)/2)
x,y,z = pt3d[midpoint][0:3]
if somaDist:
distSec = np.linalg.norm(np.array([x,y,z]))
if distSec >= somaDist[0] and distSec <= somaDist[1]:
secList.append(secName)
elif somaDistY:
if y >= somaDistY[0] and y <= somaDistY[1]:
secList.append(secName)
else:
print('Error adding secList: Sections do not contain 3d points')
return
cellRule.secLists[secListName] = list(secList)
def swapCellParamsPt3d(self, label, origIndex, targetIndex):
if label in self.cellParams:
cellRule = self.cellParams[label]
else:
print('Error swapping 3d pts: netParams.cellParams does not contain %s' % (label))
return
if origIndex not in list(range(4)) and targetIndex not in list(range(4)): # check valid indices (x,y,z,d)
print('Error swapping 3d pts: indices should be 0, 1, 2 or 3 (x,y,z,d)')
return
for sec in list(cellRule.secs.values()):
if 'pt3d' in sec['geom']:
pt3d = sec['geom']['pt3d']
for i,pt in enumerate(pt3d): pt3d[i] = list(pt)
for pt in pt3d:
tmp = float(pt[origIndex])
pt[origIndex] = float(pt[targetIndex])
pt[targetIndex] = tmp
def renameCellParamsSec(self, label, oldSec, newSec):
self.cellParams.rename(oldSec, newSec, (label, 'secs'))
def addCellParamsWeightNorm(self, label, fileName, threshold=1000):
import pickle, sys
if label in self.cellParams:
cellRule = self.cellParams[label]
else:
print('Error adding weightNorm: netParams.cellParams does not contain %s' % (label))
return
with open(fileName, 'rb') as fileObj:
if sys.version_info[0] == 2:
weightNorm = pickle.load(fileObj)
else:
weightNorm = pickle.load(fileObj, encoding='latin1')
try:
somaSec = next((k for k in list(weightNorm.keys()) if k.startswith('soma')),None)
somaWeightNorm = weightNorm[somaSec][0]
except:
print('Error setting weightNorm: no soma section available to set threshold')
return
for sec, wnorm in weightNorm.items():
if sec in cellRule['secs']:
wnorm = [min(wn,threshold*somaWeightNorm) for wn in wnorm]
cellRule['secs'][sec]['weightNorm'] = wnorm # add weight normalization factors for each section
def addCellParamsTemplate(self, label, conds={}, template=None):
if label in self.cellParams:
print('CellParams key %s already exists...' % (label))
secs = {}
if template == 'Simple_HH':
secs['soma'] = {'geom': {}, 'mechs': {}}
secs['soma']['geom'] = {'diam': 20, 'L': 20, 'Ra': 100.0, 'cm': 1}
secs['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.0003, 'el': -54.3}
elif template == 'BallStick_HH':
secs['soma'] = {'geom': {}, 'mechs': {}}
secs['soma']['geom'] = {'diam': 12, 'L': 12, 'Ra': 100.0, 'cm': 1}
secs['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.0003, 'el': -54.3}
secs['dend'] = {'geom': {}, 'mechs': {}}
secs['dend']['geom'] = {'diam': 1.0, 'L': 200.0, 'Ra': 100.0, 'cm': 1}
secs['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0}
secs['dend']['mechs']['pas'] = {'g': 0.001, 'e': -70}
self.cellParams[label] = ({'conds': conds, 'secs': secs})
def saveCellParamsRule(self, label, fileName):
import pickle, json, os
ext = os.path.basename(fileName).split('.')[1]
if label in self.cellParams:
cellRule = self.cellParams[label]
else:
print('Error saving: netParams.cellParams does not contain %s' % (label))
return
if ext == 'pkl':
with open(fileName, 'wb') as fileObj:
pickle.dump(cellRule, fileObj)
elif ext == 'json':
from .. import sim
sim.saveJSON(fileName, cellRule)
def loadCellParamsRule(self, label, fileName):
import pickle, json, os, sys
ext = os.path.basename(fileName).split('.')[1]
if ext == 'pkl':
with open(fileName, 'rb') as fileObj:
if sys.version_info[0] == 2:
cellRule = pickle.load(fileObj)
else:
cellRule = pickle.load(fileObj, encoding='latin1')
elif ext == 'json':
with open(fileName, 'rb') as fileObj:
cellRule = json.load(fileObj)
self.cellParams[label] = cellRule
def loadCellParams(self, label, fileName):
return self.loadCellParamsRule(label, fileName)
def saveCellParams(self, label, fileName):
return self.saveCellParamsRule(label, fileName)
def todict(self):
from ..sim import replaceDictODict
return replaceDictODict(self.__dict__)
def setNestedParam(self, paramLabel, paramVal):
if isinstance(paramLabel, list):
container = self
for ip in range(len(paramLabel)-1):
if hasattr(container, paramLabel[ip]):
container = getattr(container, paramLabel[ip])
else:
container = container[paramLabel[ip]]
container[paramLabel[-1]] = paramVal
elif isinstance(paramLabel, basestring):
setattr(self, paramLabel, paramVal) # set simConfig params
def setCfgMapping(self, cfg):
if hasattr(self, 'mapping'):
for k, v in self.mapping.items():
if getattr(cfg, k, None):
self.setNestedParam(v, getattr(cfg, k))
|
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for `TransformedTransitionKernel` `TransitionKernel`."""
import collections
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import test_util
FakeInnerKernelResults = collections.namedtuple(
'FakeInnerKernelResults', ['target_log_prob', 'step_size'])
def _maybe_seed(seed):
if tf.executing_eagerly():
tf.random.set_seed(seed)
return None
return seed
def make_transform_then_adapt_kernel(bijector):
trans_kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=FakeInnerKernel(target_log_prob_fn=fake_target_log_prob),
bijector=bijector)
return tfp.mcmc.SimpleStepSizeAdaptation(
inner_kernel=trans_kernel,
num_adaptation_steps=9)
def make_adapt_then_transform_kernel(bijector):
step_adaptation_kernel = tfp.mcmc.SimpleStepSizeAdaptation(
inner_kernel=FakeInnerKernel(target_log_prob_fn=fake_target_log_prob),
num_adaptation_steps=9)
return tfp.mcmc.TransformedTransitionKernel(
inner_kernel=step_adaptation_kernel,
bijector=bijector)
def fake_target_log_prob(x):
return -x**2 / 2.
class FakeInnerKernel(tfp.mcmc.TransitionKernel):
"""Fake Transition Kernel."""
def __init__(self, target_log_prob_fn, is_calibrated=True, step_size=10):
self._parameters = dict(
target_log_prob_fn=target_log_prob_fn, is_calibrated=is_calibrated,
step_size=step_size)
@property
def parameters(self):
return self._parameters
@property
def is_calibrated(self):
return self._parameters['is_calibrated']
def one_step(self, current_state, previous_kernel_results):
pass
def bootstrap_results(self, init_state):
return FakeInnerKernelResults(
target_log_prob=self._parameters['target_log_prob_fn'](init_state),
step_size=tf.nest.map_structure(tf.convert_to_tensor,
self.parameters['step_size']))
@test_util.test_all_tf_execution_regimes
class TransformedTransitionKernelTest(test_util.TestCase):
def setUp(self):
super(TransformedTransitionKernelTest, self).setUp()
self.dtype = np.float32
@test_util.numpy_disable_gradient_test('HMC')
def test_support_works_correctly_with_hmc(self):
num_results = 500
target = tfd.Beta(
concentration1=self.dtype(1.),
concentration0=self.dtype(10.))
transformed_hmc = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tf.function(target.log_prob, autograph=False),
step_size=1.64,
num_leapfrog_steps=2),
bijector=tfb.Sigmoid())
# Recall, tfp.mcmc.sample_chain calls
# transformed_hmc.bootstrap_results too.
states, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
# The initial state is used by inner_kernel.bootstrap_results.
# Note the input is *after* bijector.forward.
current_state=self.dtype(0.25),
kernel=transformed_hmc,
num_burnin_steps=200,
num_steps_between_results=1,
seed=test_util.test_seed())
self.assertEqual(num_results, tf.compat.dimension_value(states.shape[0]))
sample_mean = tf.reduce_mean(states, axis=0)
sample_var = tf.reduce_mean(
tf.math.squared_difference(states, sample_mean), axis=0)
[
sample_mean_,
sample_var_,
is_accepted_,
true_mean_,
true_var_,
] = self.evaluate([
sample_mean,
sample_var,
kernel_results.inner_results.is_accepted,
target.mean(),
target.variance(),
])
self.assertAllClose(true_mean_, sample_mean_,
atol=0.15, rtol=0.)
self.assertAllClose(true_var_, sample_var_,
atol=0.03, rtol=0.2)
self.assertNear(0.6, is_accepted_.mean(), err=0.15)
@test_util.numpy_disable_gradient_test('Langevin')
def test_support_works_correctly_with_mala(self):
num_results = 500
target = tfd.Beta(
concentration1=self.dtype(1.),
concentration0=self.dtype(10.))
transformed_mala = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.MetropolisAdjustedLangevinAlgorithm(
target_log_prob_fn=tf.function(target.log_prob, autograph=False),
step_size=1.),
bijector=tfb.Sigmoid())
# Recall, tfp.mcmc.sample_chain calls
# transformed_hmc.bootstrap_results too.
states = tfp.mcmc.sample_chain(
num_results=num_results,
# The initial state is used by inner_kernel.bootstrap_results.
# Note the input is *after* bijector.forward.
current_state=self.dtype(0.25),
kernel=transformed_mala,
num_burnin_steps=200,
num_steps_between_results=1,
trace_fn=None,
seed=test_util.test_seed())
self.assertEqual(num_results, tf.compat.dimension_value(states.shape[0]))
sample_mean = tf.reduce_mean(states, axis=0)
sample_var = tf.reduce_mean(
tf.math.squared_difference(states, sample_mean), axis=0)
[
sample_mean_,
sample_var_,
true_mean_,
true_var_,
] = self.evaluate([
sample_mean,
sample_var,
target.mean(),
target.variance(),
])
self.assertAllClose(true_mean_, sample_mean_,
atol=0.15, rtol=0.)
self.assertAllClose(true_var_, sample_var_,
atol=0.03, rtol=0.2)
def test_support_works_correctly_with_rwm(self):
num_results = 500
target = tfd.Beta(
concentration1=self.dtype(1.),
concentration0=self.dtype(10.))
transformed_rwm = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.RandomWalkMetropolis(
target_log_prob_fn=tf.function(target.log_prob, autograph=False),
new_state_fn=tfp.mcmc.random_walk_normal_fn(scale=1.5)),
bijector=tfb.Sigmoid())
# Recall, tfp.mcmc.sample_chain calls
# transformed_hmc.bootstrap_results too.
states = tfp.mcmc.sample_chain(
num_results=num_results,
# The initial state is used by inner_kernel.bootstrap_results.
# Note the input is *after* bijector.forward.
current_state=self.dtype(0.25),
kernel=transformed_rwm,
num_burnin_steps=200,
num_steps_between_results=1,
trace_fn=None,
seed=test_util.test_seed())
self.assertEqual(num_results, tf.compat.dimension_value(states.shape[0]))
sample_mean = tf.reduce_mean(states, axis=0)
sample_var = tf.reduce_mean(
tf.math.squared_difference(states, sample_mean), axis=0)
[
sample_mean_,
sample_var_,
true_mean_,
true_var_,
] = self.evaluate([
sample_mean,
sample_var,
target.mean(),
target.variance(),
])
self.assertAllClose(true_mean_, sample_mean_,
atol=0.15, rtol=0.)
self.assertAllClose(true_var_, sample_var_,
atol=0.03, rtol=0.2)
@test_util.numpy_disable_gradient_test('HMC')
def test_end_to_end_works_correctly(self):
true_mean = self.dtype([0, 0])
true_cov = self.dtype([[1, 0.5],
[0.5, 1]])
num_results = 500
def target_log_prob(x, y):
# Corresponds to unnormalized MVN.
# z = matmul(inv(chol(true_cov)), [x, y] - true_mean)
z = tf.stack([x, y], axis=-1) - true_mean
z = tf.squeeze(
tf.linalg.triangular_solve(
np.linalg.cholesky(true_cov),
z[..., tf.newaxis]),
axis=-1)
return -0.5 * tf.reduce_sum(z**2., axis=-1)
transformed_hmc = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tf.function(target_log_prob, autograph=False),
# Affine scaling means we have to change the step_size
# in order to get 60% acceptance, as was done in mcmc/hmc_test.py.
step_size=[1.23 / 0.75, 1.23 / 0.5],
num_leapfrog_steps=2),
bijector=[
tfb.Scale(scale=0.75),
tfb.Scale(scale=0.5),
])
# Recall, tfp.mcmc.sample_chain calls
# transformed_hmc.bootstrap_results too.
states, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
# The initial state is used by inner_kernel.bootstrap_results.
# Note the input is *after* `bijector.forward`.
current_state=[self.dtype(-2), self.dtype(2)],
kernel=transformed_hmc,
num_burnin_steps=200,
num_steps_between_results=1,
seed=test_util.test_seed())
states = tf.stack(states, axis=-1)
self.assertEqual(num_results, tf.compat.dimension_value(states.shape[0]))
sample_mean = tf.reduce_mean(states, axis=0)
x = states - sample_mean
sample_cov = tf.matmul(x, x, transpose_a=True) / self.dtype(num_results)
[sample_mean_, sample_cov_, is_accepted_] = self.evaluate([
sample_mean, sample_cov, kernel_results.inner_results.is_accepted])
self.assertAllClose(0.6, is_accepted_.mean(), atol=0.15, rtol=0.)
self.assertAllClose(sample_mean_, true_mean, atol=0.2, rtol=0.)
self.assertAllClose(sample_cov_, true_cov, atol=0., rtol=0.4)
def test_bootstrap_requires_xor_args(self):
transformed_fake = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=FakeInnerKernel(target_log_prob_fn=fake_target_log_prob),
bijector=tfb.Exp())
with self.assertRaisesWithPredicateMatch(
ValueError, r'Must specify exactly one'):
transformed_fake.bootstrap_results()
with self.assertRaisesWithPredicateMatch(
ValueError, r'Must specify exactly one'):
transformed_fake.bootstrap_results(
init_state=2., transformed_init_state=np.log(2.))
def test_bootstrap_correctly_untransforms(self):
transformed_fake = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=FakeInnerKernel(target_log_prob_fn=fake_target_log_prob),
bijector=tfb.Exp())
automatic_pkr, manual_pkr = self.evaluate([
transformed_fake.bootstrap_results(2.),
transformed_fake.bootstrap_results(transformed_init_state=[4., 5.]),
])
self.assertNear(np.log(2.), automatic_pkr.transformed_state, err=1e-6)
self.assertAllClose(
[4., 5.], manual_pkr.transformed_state, atol=0., rtol=1e-6)
def test_copy_works(self):
transformed = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=FakeInnerKernel(target_log_prob_fn=fake_target_log_prob),
bijector=tfb.Scale(2.))
transformed_copy = tfp.mcmc.TransformedTransitionKernel(
**transformed.parameters)
pkr, pkr_copy = self.evaluate([
transformed.bootstrap_results(1.),
transformed_copy.bootstrap_results(1.)
])
self.assertAllClose(pkr.inner_results.target_log_prob,
pkr_copy.inner_results.target_log_prob)
def test_is_calibrated(self):
self.assertTrue(
tfp.mcmc.TransformedTransitionKernel(
FakeInnerKernel(lambda x: -x**2 / 2, True),
tfb.Identity()).is_calibrated)
self.assertFalse(
tfp.mcmc.TransformedTransitionKernel(
FakeInnerKernel(lambda x: -x**2 / 2, False),
tfb.Identity()).is_calibrated)
def test_bijector_valid_transform_then_adapt(self):
new_kernel = make_transform_then_adapt_kernel(tfb.Exp())
pkr_one, pkr_two = self.evaluate([
new_kernel.bootstrap_results(2.),
new_kernel.bootstrap_results(9.),
])
self.assertNear(np.log(2.),
pkr_one.inner_results.transformed_state,
err=1e-6)
self.assertNear(np.log(9.),
pkr_two.inner_results.transformed_state,
err=1e-6)
def test_bijector_valid_adapt_then_transform(self):
new_kernel = make_adapt_then_transform_kernel(tfb.Exp())
pkr_one, pkr_two = self.evaluate([
new_kernel.bootstrap_results(2.),
new_kernel.bootstrap_results(9.),
])
self.assertNear(np.log(2.), pkr_one.transformed_state, err=1e-6)
self.assertNear(np.log(9.), pkr_two.transformed_state, err=1e-6)
@test_util.numpy_disable_gradient_test('HMC')
def test_step_size_changed(self):
target_dist = tfd.MultivariateNormalDiag(loc=[0., 0.], scale_diag=[1., 10.])
# `hmc_kernel`'s step size is far from optimal
hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_dist.log_prob,
num_leapfrog_steps=27,
step_size=10)
step_adaptation_kernel = tfp.mcmc.SimpleStepSizeAdaptation(
inner_kernel=hmc_kernel,
adaptation_rate=0.8,
num_adaptation_steps=9)
trans_kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=step_adaptation_kernel,
bijector=tfb.Exp()
)
kernel_results = trans_kernel.inner_kernel.bootstrap_results(tf.zeros(2))
stream = test_util.test_seed_stream()
for _ in range(2):
_, kernel_results = trans_kernel.inner_kernel.one_step(tf.zeros(2),
kernel_results,
seed=stream())
adapted_step_size = self.evaluate(
kernel_results.inner_results.accepted_results.step_size)
self.assertLess(adapted_step_size, 7)
def test_deeply_nested(self):
step_kernel = tfp.mcmc.SimpleStepSizeAdaptation(
inner_kernel=FakeInnerKernel(target_log_prob_fn=fake_target_log_prob),
num_adaptation_steps=9)
double_step_kernel = tfp.mcmc.SimpleStepSizeAdaptation(
inner_kernel=step_kernel,
num_adaptation_steps=9)
trans_kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=double_step_kernel,
bijector=tfb.Exp())
pkr_one, pkr_two = self.evaluate([
trans_kernel.bootstrap_results(2.),
trans_kernel.bootstrap_results(9.),
])
self.assertNear(np.log(2.),
pkr_one.transformed_state,
err=1e-6)
self.assertNear(np.log(9.),
pkr_two.transformed_state,
err=1e-6)
@test_util.numpy_disable_gradient_test('HMC')
def test_nested_transform(self):
target_dist = tfd.Normal(loc=0., scale=1.)
b1 = tfb.Scale(0.5)
b2 = tfb.Exp()
chain = tfb.Chain([b2, b1]) # applies bijectors right to left (b1 then b2).
inner_kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_dist.log_prob,
num_leapfrog_steps=27,
step_size=10),
bijector=b1)
outer_kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=inner_kernel,
bijector=b2)
chain_kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_dist.log_prob,
num_leapfrog_steps=27,
step_size=10),
bijector=chain)
outer_pkr_one, outer_pkr_two = self.evaluate([
outer_kernel.bootstrap_results(2.),
outer_kernel.bootstrap_results(9.),
])
# the outermost kernel only applies the outermost bijector
self.assertNear(np.log(2.), outer_pkr_one.transformed_state, err=1e-6)
self.assertNear(np.log(9.), outer_pkr_two.transformed_state, err=1e-6)
chain_pkr_one, chain_pkr_two = self.evaluate([
chain_kernel.bootstrap_results(2.),
chain_kernel.bootstrap_results(9.),
])
# all bijectors are applied to the inner kernel, from innermost to outermost
# this behavior is completely analogous to a bijector Chain
self.assertNear(chain_pkr_one.transformed_state,
outer_pkr_one.inner_results.transformed_state,
err=1e-6)
self.assertEqual(chain_pkr_one.inner_results.accepted_results,
outer_pkr_one.inner_results.inner_results.accepted_results)
self.assertNear(chain_pkr_two.transformed_state,
outer_pkr_two.inner_results.transformed_state,
err=1e-6)
self.assertEqual(chain_pkr_two.inner_results.accepted_results,
outer_pkr_two.inner_results.inner_results.accepted_results)
seed = test_util.test_seed(sampler_type='stateless')
outer_results_one, outer_results_two = self.evaluate([
outer_kernel.one_step(2., outer_pkr_one, seed=seed),
outer_kernel.one_step(9., outer_pkr_two, seed=seed)
])
chain_results_one, chain_results_two = self.evaluate([
chain_kernel.one_step(2., chain_pkr_one, seed=seed),
chain_kernel.one_step(9., chain_pkr_two, seed=seed)
])
self.assertNear(chain_results_one[0],
outer_results_one[0],
err=1e-6)
self.assertNear(chain_results_two[0],
outer_results_two[0],
err=1e-6)
@test_util.numpy_disable_gradient_test('HMC')
def test_multipart_bijector(self):
seed_stream = test_util.test_seed_stream()
prior = tfd.JointDistributionSequential([
tfd.Gamma(1., 1.),
lambda scale: tfd.Uniform(0., scale),
lambda concentration: tfd.CholeskyLKJ(4, concentration),
], validate_args=True)
likelihood = lambda corr: tfd.MultivariateNormalTriL(scale_tril=corr)
obs = self.evaluate(
likelihood(
prior.sample(seed=seed_stream())[-1]).sample(seed=seed_stream()))
bij = prior.experimental_default_event_space_bijector()
def target_log_prob(scale, conc, corr):
return prior.log_prob(scale, conc, corr) + likelihood(corr).log_prob(obs)
kernel = tfp.mcmc.HamiltonianMonteCarlo(target_log_prob,
num_leapfrog_steps=3, step_size=.5)
kernel = tfp.mcmc.TransformedTransitionKernel(kernel, bij)
init = self.evaluate(
tuple(tf.random.uniform(s, -2., 2., seed=seed_stream())
for s in bij.inverse_event_shape(prior.event_shape)))
state = bij.forward(init)
kr = kernel.bootstrap_results(state)
next_state, next_kr = kernel.one_step(state, kr, seed=seed_stream())
self.evaluate((state, kr, next_state, next_kr))
expected = (target_log_prob(*state) -
bij.inverse_log_det_jacobian(state, [0, 0, 2]))
actual = kernel._inner_kernel.target_log_prob_fn(*init) # pylint: disable=protected-access
self.assertAllClose(expected, actual)
if __name__ == '__main__':
test_util.main()
|
|
# Standard imports
import logging
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy
from sklearn import metrics
import sys
from numpy import cross
from numpy.linalg import norm
import emission.storage.decorations.trip_queries as esdtq
import emission.storage.decorations.section_queries as esdsq
"""
This class organizes data into bins by similarity. It then orders the bins
by largest to smallest and removes the bottom portion of the bins.
Two trips are in the same bin if both their start points and end points
are within a certain number of meters of each others.
As input, this class takes the following:
- data: the data to put into bins. The data should be a list of Trip objects that have
start and end locations.
- radius: the radius for determining how close the start points and end points of two
trips have to be for the trips to be put in the same bin
This is called by cluster_pipeline.py.
"""
class similarity:
def __init__(self, data, radius, old=True):
self.data = data
if not data:
self.data = []
self.bins = []
self.radius = float(radius)
self.old = old
if not old:
for a in self.data:
# print "a is %s" % a
t = a
try:
start_lon = t.start_loc["coordinates"][0]
start_lat = t.start_loc["coordinates"][1]
end_lon = t.end_loc["coordinates"][0]
end_lat = t.end_loc["coordinates"][1]
# logging.debug("start lat = %s" % start_lat)
if self.distance(start_lat, start_lon, end_lat, end_lon):
self.data.remove(a)
except:
self.data.remove(a)
else:
for a in range(len(self.data)-1, -1, -1):
start_lat = self.data[a].trip_start_location.lat
start_lon = self.data[a].trip_start_location.lon
end_lat = self.data[a].trip_end_location.lat
end_lon = self.data[a].trip_end_location.lon
if self.distance(start_lat, start_lon, end_lat, end_lon):
self.data.pop(a)
logging.debug('After removing trips that are points, there are %s data points' % len(self.data))
self.size = len(self.data)
#create bins
def bin_data(self):
for a in range(self.size):
added = False
for bin in self.bins:
try:
if self.match(a,bin):
bin.append(a)
added = True
break
except:
added = False
if not added:
self.bins.append([a])
self.bins.sort(key=lambda bin: len(bin), reverse=True)
#delete lower portion of bins
def delete_bins(self):
if len(self.bins) <= 1:
self.newdata = self.data
return
num = self.elbow_distance()
sum = 0
for i in range(len(self.bins)):
sum += len(self.bins[i])
if len(self.bins[i]) <= len(self.bins[num]):
sum -= len(self.bins[i])
num = i
break
logging.debug('the new number of trips is %d' % sum)
logging.debug('the cutoff point is %d' % num)
self.num = num
#self.graph()
for i in range(len(self.bins) - num):
self.bins.pop()
newdata = []
for bin in self.bins:
for b in bin:
d = self.data[b]
newdata.append(self.data[b])
self.newdata = newdata if len(newdata) > 1 else self.data
#calculate the cut-off point in the histogram
#This is motivated by the need to calculate the cut-off point
#that separates the common trips from the infrequent trips.
#This works by approximating the point of maximum curvature
#from the curve formed by the points of the histogram. Since
#it is a discrete set of points, we calculate the point of maximum
#distance from the line formed by connecting the height of the
#tallest bin with that of the shortest bin, as described
#here: http://stackoverflow.com/questions/2018178/finding-the-best-trade-off-point-on-a-curve?lq=1
#We then remove all bins of lesser height than the one chosen.
def elbow_distance(self):
y = [0] * len(self.bins)
for i in range(len(self.bins)):
y[i] = len(self.bins[i])
N = len(y)
x = range(N)
max = 0
index = -1
a = numpy.array([x[0], y[0]])
b = numpy.array([x[-1], y[-1]])
n = norm(b-a)
new_y = []
for i in range(0, N):
p = numpy.array([x[i], y[i]])
dist = norm(numpy.cross(p-a,p-b))/n
new_y.append(dist)
if dist > max:
max = dist
index = i
return index
#check if two trips match
def match(self,a,bin):
for b in bin:
if not self.old:
if not self.distance_helper_new(a,b):
return False
else:
if not self.distance_helper(a,b):
return False
return True
#create the histogram
def graph(self):
bars = [0] * len(self.bins)
for i in range(len(self.bins)):
bars[i] = len(self.bins[i])
N = len(bars)
index = numpy.arange(N)
width = .2
plt.bar(index+width, bars, color='k')
try:
plt.bar(self.num+width, bars[self.num], color='g')
except Exception:
pass
plt.xlim([0, N])
plt.xlabel('Bins')
plt.ylabel('Number of elements')
plt.savefig('histogram.png')
#evaluate the bins as if they were a clustering on the data
def evaluate_bins(self):
self.labels = []
for bin in self.bins:
for b in bin:
self.labels.append(self.bins.index(bin))
if not self.data or not self.bins:
return
if len(self.labels) < 2:
logging.debug('Everything is in one bin.')
return
labels = numpy.array(self.labels)
points = []
for bin in self.bins:
for b in bin:
start_lat = self.data[b].trip_start_location.lat
start_lon = self.data[b].trip_start_location.lon
end_lat = self.data[b].trip_end_location.lat
end_lon = self.data[b].trip_end_location.lon
path = [start_lat, start_lon, end_lat, end_lon]
points.append(path)
a = metrics.silhouette_score(numpy.array(points), labels)
logging.debug('number of bins is %d' % len(self.bins))
logging.debug('silhouette score is %d' % a)
return a
#calculate the distance between two trips
def distance_helper(self, a, b):
starta = self.data[a].trip_start_location
startb = self.data[b].trip_start_location
enda = self.data[a].trip_end_location
endb = self.data[b].trip_end_location
start = self.distance(starta.lat, starta.lon, startb.lat, startb.lon)
end = self.distance(enda.lat, enda.lon, endb.lat, endb.lon)
if start and end:
return True
return False
def distance_helper_new(self, a, b):
tripa = self.data[a]
tripb = self.data[b]
starta = tripa.start_loc["coordinates"]
startb = tripb.start_loc["coordinates"]
enda = tripa.end_loc["coordinates"]
endb = tripb.end_loc["coordinates"]
# Flip indices because points are in geojson (i.e. lon, lat)
start = self.distance(starta[1], starta[0], startb[1], startb[0])
end = self.distance(enda[1], enda[0], endb[1], endb[0])
return True if start and end else False
#calculate the meter distance between two trips
def distance(self, lat1, lon1, lat2, lon2):
R = 6371000
rlat1 = math.radians(lat1)
rlat2 = math.radians(lat2)
lon = math.radians(lon2 - lon1);
lat = math.radians(lat2-lat1);
a = math.sin(lat/2.0)**2 + math.cos(rlat1)*math.cos(rlat2) * math.sin(lon/2.0)**2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = R * c
if d <= self.radius:
return True
return False
|
|
import multiprocessing
import threading
import Queue
import signal, os
import time
"""
Runs a callable with given arguments and put the results on a queue.
@param queue The queue to put the results on
@param target The callable to run
@param args Arguments for the callable
@param kwargs Keyword arguments for the callable
"""
def resultPacker(queue, target, args=(), kwargs={}):
result = target(*args, **kwargs)
queue.put(result)
class ProcessHandler:
maxProcesses = 7
maxPrioritys = 4
debug = False
lock = None
isStopped = False
waitingProcesses = None
pausedProcesses = None
activeProcesses = None
def __init__(self, maxProcesses=7, maxPrioritys=4, debug=False):
self.maxProcesses = maxProcesses
self.maxPrioritys = maxPrioritys
self.debug = debug
self.isStopped = False
self.lock = threading.Lock()
self.waitingProcesses = [[] for i in range(self.maxPrioritys)]
self.pausedProcesses = [[] for i in range(self.maxPrioritys)]
self.activeProcesses = [[] for i in range(self.maxPrioritys)]
"""
Counts the amount of currently running processes
@return the amount of running processes
"""
def activeCount(self):
count = 0
for pros in self.activeProcesses:
count += len(pros)
return count
"""
Removes all finished processes from the list
"""
def removeFinished(self):
for active in self.activeProcesses:
removed = []
for p in active:
if p.exitcode != None:
removed.append(p)
for p in removed:
active.remove(p)
if self.debug:
print "Removed:", p.name
"""
Tries to free a process for a given priority
@param neededPriority The priority of the process that needs the resource
@return True if a process resource is free/was freed, False otherwise
"""
def freeProcess(self, neededPriority):
#self.removeFinished()
# Are there free resources?
if self.activeCount() < self.maxProcesses:
return True
# If there is no Process of this priority running one should get freed not matter what
if len(self.activeProcesses[neededPriority]) < 1:
neededPriority = self.maxPrioritys + 1
for (priority,(paused,active)) in enumerate(zip(self.pausedProcesses, self.activeProcesses)):
# Not important enough?
if neededPriority <= priority:
return False
# Only stop a process if there is still one of the same priority running
if len(active) > 1:
toPause = active.pop(0)
try:
os.kill(toPause.pid, signal.SIGSTOP)
paused.append(toPause)
if self.debug:
print "Pause:", toPause.name
except OSError, e:
print "Tried to pause process but it's already gone?", toPause.name
return True
return False
"""
Tries to start new processes and pauses other ones if needed
"""
def update(self):
self.lock.acquire()
try:
self.removeFinished()
for (priority,(waiting,paused,active)) in reversed(list(enumerate(zip(self.waitingProcesses, self.pausedProcesses, self.activeProcesses)))):
# Try to continue processes
while len(paused) > 0:
if not self.freeProcess(priority):
break
toStart = paused.pop(0)
try:
os.kill(toStart.pid, signal.SIGCONT)
active.append(toStart)
if self.debug:
print "Continue:", toStart.name
except OSError, e:
print "Can't kill process. Process '%s' is not running." % toStart.name
# Try to start new processes
while len(waiting) > 0:
if not self.freeProcess(priority):
break
onComplete, ocArgs, ocKwargs, target, args, kwargs, name = waiting.pop(0)
results = multiprocessing.Queue()
process = multiprocessing.Process(target=resultPacker, args=(results, target, args, kwargs), name=name)
thread = threading.Thread(target=self.runProcess, args=(results, process, onComplete, ocArgs, ocKwargs))
thread.start()
active.append(process)
if self.debug:
print "Start:", process.name
# pro = self.activeProcesses
# print len(pro[0]), len(pro[1]), len(pro[2]), len(pro[3])
# tmp = "["
# for (waiting,paused,active) in zip(self.waitingProcesses, self.pausedProcesses, self.activeProcesses):
# tmp = tmp + "["
# for pro in active:
# tmp = tmp + str(pro) + ","
# tmp = tmp + "]"
# tmp = tmp + "]"
# print tmp
finally:
self.lock.release()
"""
Runs a process and waits for the process (queue)
and then let's a callable deal with the result
@param queue Queue wich will get one result put on
@param process The process to run
@param onComplete Callable that can work with this result
@param onCompleteArgs args for onComplete
@param onCompleteKwargs more args for onComplete
"""
def runProcess(self, queue, process, onComplete=None, onCompleteArgs=(), onCompleteKwargs={}):
res = False
poll = True
process.start()
while poll:
try:
res = queue.get(timeout = 1)
except Queue.Empty, e:
pass
process.join(timeout = 0)
# Check if the process ended in a not nice way
if process.exitcode != None:
poll = False
if process.exitcode != 0:
res = False
# Shoot the process and exit if the server is stopped
elif self.isStopped:
self.stopProcess(process = process)
return
# Make sure the process gets joined
process.join()
self.update()
if onComplete != None:
onComplete(res, *onCompleteArgs, **onCompleteKwargs)
"""
Run a task in its own process and executes another callable on the result
in an own thread
@param priority Priority of the task
@param onComplete Callable that will deal with the result of target
@param onCompleteArgs Arguments for target
@param onCompleteKwargs Keyword arguments for target
@param target Callable that represents the task
@param args Arguments for target
@param kwargs Keyword arguments for target
@param name Name of the process
"""
def runTask(self, target, args=(), kwargs={}, priority=0, onComplete=None, onCompleteArgs=(), onCompleteKwargs={}, name=None):
if priority >= self.maxPrioritys or priority < 0:
raise "Fuckedup Priority"
self.lock.acquire()
try:
self.waitingProcesses[priority].append((onComplete, onCompleteArgs, onCompleteKwargs, target, args, kwargs, name))
finally:
self.lock.release()
self.update()
"""
Run a task in it's own process and returns the result once it's done
@param priority Priority of the task
@param target Callable that represents the task
@param args Arguments for target
@param kwargs Keyword arguments for target
@param name Name of the process
@return
"""
def runTaskWait(self, target, args=(), kwargs={}, priority=0, name=None):
res = Queue.Queue()
self.runTask(target=target, args=args, kwargs=kwargs, priority=priority, onComplete=res.put, onCompleteArgs=(), name=name)
return res.get()
"""
Shoots a process
@param name The name of the process that should get stopped
@param process The process that should get stopped
"""
def stopProcess(self, name=None, process=None):
if process == None:
if name == None:
return
self.lock.acquire()
try:
for (paused, waiting, active) in zip(self.pausedProcesses, self.waitingProcesses, self.activeProcesses):
for pro in paused:
if pro.name == name:
process = pro
for pro in waiting:
if pro.name == name:
process = pro
for pro in active:
if pro.name == name:
process = pro
finally:
self.lock.release()
try:
os.kill(process.pid, signal.SIGKILL)
#process.join()
#self.update()
except OSError, e:
print "Tried to shoot process but it's already gone?", process.pid
"""
Waits till all processes of a priority are finished and then returns
@param priority The priority to wait for
@param waitTime Amount of seconds between checking
"""
def waitForPriority(self, priority, waitTime=1):
while True:
count = 0
self.lock.acquire()
try:
if (len(self.pausedProcesses[priority]) + len(self.waitingProcesses[priority]) + len(self.activeProcesses[priority])) == 0:
return
finally:
self.lock.release()
time.sleep(waitTime)
"""
After calling all processes will get ended (KILLED)
"""
def nukeEverything(self):
self.lock.acquire()
try:
self.isStopped = True
for i in range(self.maxPrioritys):
self.waitingProcesses[i] = []
self.pausedProcesses[i] = []
self.activeProcesses[i] = []
finally:
self.lock.release()
# Random test code
def fib(n):
if n <= 2:
return n
return fib(n-1) + fib(n-2)
def printer(toPrint):
y = toPrint
print "\tPrinter:", toPrint
if __name__ == '__main__':
ph = ProcessHandler(8)
#ph.runTask(0, printer, fib, args=tuple([38]), name=str(0)+"-FromLoop-"+str(0))
#ph.runTask(0, printer, fib, args=tuple([38]), name=str(0)+"-FromLoop-"+str(1))
#print ph.runTaskWait(priority=0, target=fib, args=tuple([38]))
for prio in range(4):
for i in range(1000):
ph.runTask(priority=prio, onComplete=printer, target=fib, args=tuple([34]), name=str(prio)+"-"+str(i))
time.sleep(5)
print "pulling the Nuke"
ph.nukeEverything()
ph.waitForPriority(2, 1)
print "Done!"
#time.sleep(100)
#print "I'm out"
#while True:
# time.sleep(1)
# ph.update()
"""
#phand.addTask(3, test1, tuple([phand]), "Test1orso")
#for i in range(100):
# time.sleep(1)
# print "Bla-", i
# phand.join()
# x = True
# while x:
# x = phand.startProcess()
for i in range(10):
phand.addTask(0, fib, tuple([38]), "0-FromLoop-"+str(i))
for i in range(10):
phand.addTask(1, fib, tuple([38]), "1-FromLoop-"+str(i))
for i in range(10):
phand.addTask(2, fib, tuple([38]), "2-FromLoop-"+str(i))
for i in range(10):
phand.addTask(3, fib, tuple([38]), "3-FromLoop-"+str(i))
for i in range(100):
time.sleep(1)
print "Bla-", i
phand.join()
x = True
while x:
x = phand.startProcess()
#p = multiprocessing.Process(target=onComplete)
#p.start()
#os.kill(p.pid, signal.SIGSTOP)
#time.sleep(1)
#print "Bla"
#os.kill(p.pid, signal.SIGCONT)
#time.sleep(1)
#print "Bla"
#p.join()
#"""
|
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import array
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4, ipv6
from ryu.lib.packet import icmp, tcp, udp, arp
from ryu.lib import snortlib
class SimpleSwitchSnort(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {'snortlib': snortlib.SnortLib}
def __init__(self, *args, **kwargs):
super(SimpleSwitchSnort, self).__init__(*args, **kwargs)
self.snort = kwargs['snortlib']
self.snort_port = 3
self.mac_to_port = {}
self.datapath = None
socket_config = {'unixsock': False}
self.snort.set_config(socket_config)
self.snort.start_socket_server()
def packet_print(self, pkt):
pkt = packet.Packet(array.array('B', pkt))
eth = pkt.get_protocol(ethernet.ethernet)
_ipv4 = pkt.get_protocol(ipv4.ipv4)
_icmp = pkt.get_protocol(icmp.icmp)
if _icmp:
self.logger.info("%r", _icmp)
if _ipv4:
self.logger.info("%r", _ipv4)
if eth:
self.logger.info("%r", eth)
# for p in pkt.protocols:
# if hasattr(p, 'protocol_name') is False:
# break
# print('p: %s' % p.protocol_name)
def send_blocking_flowrule(self, msg):
datapath = self.datapath
if datapath is None:
self.logger.info('no switch detected yet, ignoring alert...')
return
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
pkt = packet.Packet(array.array('B',msg.pkt))
pkt_eth = pkt.get_protocol(ethernet.ethernet)
mac_src = pkt_eth.src
eth_type = pkt_eth.ethertype
pkt_ipv6 = pkt.get_protocol(ipv6.ipv6)
if pkt_ipv6:
self.logger.info('received ipv6 packet, blocking ipv6 rules is not supported yet...')
return
pkt_ipv4 = pkt.get_protocol(ipv4.ipv4)
if pkt_ipv4:
dst = pkt_ipv4.dst
src = pkt_ipv4.src
ip_proto = pkt_ipv4.proto
pkt_tcp = pkt.get_protocol(tcp.tcp)
pkt_udp = pkt.get_protocol(udp.udp)
pkt_icmp = pkt.get_protocol(icmp.icmp)
if pkt_tcp:
self.logger.info('received tcp packet')
L4_pkt = pkt_tcp
L4_src = L4_pkt.src_port
L4_dst = L4_pkt.dst_port
match = parser.OFPMatch(eth_src=mac_src,
eth_type=eth_type, ipv4_dst=pkt_ipv4.dst, ipv4_src=pkt_ipv4.src,
ip_proto=ip_proto, tcp_src=L4_src, tcp_dst=L4_dst)
elif pkt_udp:
self.logger.info('received udp packet')
L4_pkt = pkt_udp
L4_src = L4_pkt.src_port
L4_dst = L4_pkt.dst_port
match = parser.OFPMatch(eth_src=mac_src,
eth_type=eth_type, ipv4_dst=pkt_ipv4.dst, ipv4_src=pkt_ipv4.src,
ip_proto=ip_proto, udp_src=L4_src, udp_dst=L4_dst)
elif pkt_icmp:
self.logger.info('received icmp packet')
match = parser.OFPMatch(eth_src=mac_src,
eth_type=eth_type, ipv4_dst=pkt_ipv4.dst, ipv4_src=pkt_ipv4.src,
ip_proto=ip_proto)
else:
self.logger.info('received other packet')
match = parser.OFPMatch(eth_src=mac_src,
eth_type=eth_type, ipv4_dst=pkt_ipv4.dst, ipv4_src=pkt_ipv4.src,
ip_proto=ip_proto)
priority = 100
# only send out of snort port, to keep on monitoring
#actions = [parser.OFPActionOutput(self.snort_port)]
# empty actions = DROP packets
actions = []
self.add_flow(datapath, priority, match, actions)
@set_ev_cls(snortlib.EventAlert, MAIN_DISPATCHER)
def process_snort_alert(self, ev):
self._dump_alert(ev)
msg = ev.msg
alertmsg = ''.join(msg.alertmsg)
if 'ryu block' in alertmsg:
self.logger.info('Blocking this flow:{0}'.format(alertmsg))
self.send_blocking_flowrule(msg)
def _dump_alert(self, ev):
msg = ev.msg
print('alertmsg: %s' % ''.join(msg.alertmsg))
self.packet_print(msg.pkt)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
self.datapath = datapath
self.logger.info('add datapath id: {0}'.format(datapath.id))
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
# self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port),
parser.OFPActionOutput(self.snort_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
|
|
# =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Simone Benatti, Radu Serban
# =============================================================================
#
# Chrono demonstration of sensors attached to a HMMWV
# A camera, gps and an imu are attached to the HMMWV
#
# =============================================================================
import pychrono as chrono
import pychrono.vehicle as veh
import pychrono.irrlicht as irr
import pychrono.sensor as sens
import math as m
import os
import math
#// =============================================================================
def main():
print("Copyright (c) 2017 projectchrono.org" + "\n\n")
# Create systems
# Create the HMMWV vehicle, set parameters, and initialize
my_hmmwv = veh.HMMWV_Full()
my_hmmwv.SetContactMethod(contact_method)
my_hmmwv.SetChassisCollisionType(chassis_collision_type)
my_hmmwv.SetChassisFixed(False)
my_hmmwv.SetInitPosition(chrono.ChCoordsysD(initLoc, initRot))
my_hmmwv.SetPowertrainType(powertrain_model)
my_hmmwv.SetDriveType(drive_type)
my_hmmwv.SetSteeringType(steering_type)
my_hmmwv.SetTireType(tire_model)
my_hmmwv.SetTireStepSize(tire_step_size)
my_hmmwv.Initialize()
my_hmmwv.SetChassisVisualizationType(chassis_vis_type)
my_hmmwv.SetSuspensionVisualizationType(suspension_vis_type)
my_hmmwv.SetSteeringVisualizationType(steering_vis_type)
my_hmmwv.SetWheelVisualizationType(wheel_vis_type)
my_hmmwv.SetTireVisualizationType(tire_vis_type)
# Create the terrain
terrain = veh.RigidTerrain(my_hmmwv.GetSystem())
if (contact_method == chrono.ChContactMethod_NSC):
patch_mat = chrono.ChMaterialSurfaceNSC()
patch_mat.SetFriction(0.9)
patch_mat.SetRestitution(0.01)
elif (contact_method == chrono.ChContactMethod_SMC):
patch_mat = chrono.ChMaterialSurfaceSMC()
patch_mat.SetFriction(0.9)
patch_mat.SetRestitution(0.01)
patch_mat.SetYoungModulus(2e7)
patch = terrain.AddPatch(patch_mat,
chrono.ChVectorD(0, 0, 0), chrono.ChVectorD(0, 0, 1),
terrainLength, terrainWidth)
patch.SetTexture(veh.GetDataFile("terrain/textures/tile4.jpg"), 200, 200)
patch.SetColor(chrono.ChColor(0.8, 0.8, 0.5))
terrain.Initialize()
# Create the vehicle Irrlicht interface
app = veh.ChWheeledVehicleIrrApp(my_hmmwv.GetVehicle(), 'HMMWV', irr.dimension2du(1000,800))
app.SetSkyBox()
app.AddTypicalLights(irr.vector3df(30, -30, 100), irr.vector3df(30, 50, 100), 250, 130)
app.AddTypicalLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
app.SetChaseCamera(trackPoint, 6.0, 0.5)
app.SetTimestep(step_size)
app.AssetBindAll()
app.AssetUpdateAll()
# Initialize output
try:
os.mkdir(out_dir)
except:
print("Error creating directory " )
# Set up vehicle output
my_hmmwv.GetVehicle().SetChassisOutput(True);
my_hmmwv.GetVehicle().SetSuspensionOutput(0, True);
my_hmmwv.GetVehicle().SetSteeringOutput(0, True);
my_hmmwv.GetVehicle().SetOutput(veh.ChVehicleOutput.ASCII , out_dir, "output", 0.1);
# Generate JSON information with available output channels
my_hmmwv.GetVehicle().ExportComponentList(out_dir + "/component_list.json");
# Create the interactive driver system
driver = veh.ChIrrGuiDriver(app)
# Set the time response for steering and throttle keyboard inputs.
steering_time = 1.0 # time to go from 0 to +1 (or from 0 to -1)
throttle_time = 1.0 # time to go from 0 to +1
braking_time = 0.3 # time to go from 0 to +1
driver.SetSteeringDelta(render_step_size / steering_time)
driver.SetThrottleDelta(render_step_size / throttle_time)
driver.SetBrakingDelta(render_step_size / braking_time)
driver.Initialize()
# Simulation loop
# Number of simulation steps between miscellaneous events
render_steps = m.ceil(render_step_size / step_size)
debug_steps = m.ceil(debug_step_size / step_size)
# Initialize simulation frame counter and simulation time
step_number = 0
render_frame = 0
if (contact_vis):
app.SetSymbolscale(1e-4)
# app.SetContactsDrawMode(irr.eCh_ContactsDrawMode::CONTACT_FORCES);
# ---------------------------------------------
# Create a sensor manager and add a point light
# ---------------------------------------------
manager = sens.ChSensorManager(my_hmmwv.GetSystem())
manager.scene.AddPointLight(chrono.ChVectorF(0, 0, 100), chrono.ChVectorF(2, 2, 2), 5000)
manager.SetKeyframeSizeFromTimeStep(.001,1/5)
# ------------------------------------------------
# Create a camera and add it to the sensor manager
# ------------------------------------------------
fov = 1.408
lag = 0
update_rate = 5
exposure_time = 1/update_rate
offset_pose = chrono.ChFrameD(chrono.ChVectorD(-5, 0, 2))
cam = sens.ChCameraSensor(
my_hmmwv.GetChassisBody(), # body camera is attached to
update_rate, # update rate in Hz
offset_pose, # offset pose
image_width, # image width
image_height, # image height
fov # camera's horizontal field of view
)
cam.SetName("Camera Sensor")
# cam.SetLag(0);
# cam.SetCollectionWindow(0);
# Visualizes the image
if vis:
cam.PushFilter(sens.ChFilterVisualize(image_width, image_height, "HMMWV Camera"))
# Save the current image to a png file at the specified path
if save:
cam.PushFilter(sens.ChFilterSave(out_dir + "cam/"))
# Add a camera to a sensor manager
manager.AddSensor(cam)
# ----------------------------------------------
# Create an IMU sensor and add it to the manager
# ----------------------------------------------
offset_pose = chrono.ChFrameD(chrono.ChVectorD(-8, 0, 1), chrono.Q_from_AngAxis(0, chrono.ChVectorD(0, 1, 0)))
imu = sens.ChIMUSensor(my_hmmwv.GetChassisBody(), # body imu is attached to
imu_update_rate, # update rate in Hz
offset_pose, # offset pose
imu_noise_none # noise model
)
imu.SetName("IMU Sensor")
imu.SetLag(imu_lag);
imu.SetCollectionWindow(imu_collection_time);
# Provides the host access to the imu data
imu.PushFilter(sens.ChFilterIMUAccess())
# Add the imu to the sensor manager
manager.AddSensor(imu)
# ----------------------------------------------
# Create an GPS sensor and add it to the manager
# ----------------------------------------------
offset_pose = chrono.ChFrameD(chrono.ChVectorD(-8, 0, 1), chrono.Q_from_AngAxis(0, chrono.ChVectorD(0, 1, 0)))
gps = sens.ChGPSSensor(my_hmmwv.GetChassisBody(), # body imu is attached to
gps_update_rate, # update rate in Hz
offset_pose, # offset pose
gps_reference,
gps_noise_none # noise model
)
gps.SetName("GPS Sensor")
gps.SetLag(gps_lag)
gps.SetCollectionWindow(gps_collection_time)
# Provides the host access to the gps data
gps.PushFilter(sens.ChFilterGPSAccess())
# Add the gps to the sensor manager
manager.AddSensor(gps)
realtime_timer = chrono.ChRealtimeStepTimer()
while (app.GetDevice().run()):
time = my_hmmwv.GetSystem().GetChTime()
#End simulation
if (time >= t_end):
break
if(step_number%render_steps ==0):
app.BeginScene(True, True, irr.SColor(255, 140, 161, 192))
app.DrawAll()
app.EndScene()
#Debug logging
if (debug_output and step_number % debug_steps == 0) :
print("\n\n============ System Information ============\n")
print( "Time = " << time << "\n\n")
#my_hmmwv.DebugLog(OUT_SPRINGS | OUT_SHOCKS | OUT_CONSTRAINTS)
marker_driver = my_hmmwv.GetChassis().GetMarkers()[0].GetAbsCoord().pos
marker_com = my_hmmwv.GetChassis().GetMarkers()[1].GetAbsCoord().pos
print( "Markers\n")
print( " Driver loc: " , marker_driver.x , " " , marker_driver.y , " " , marker_driver.z)
print( " Chassis COM loc: " , marker_com.x, " ", marker_com.y, " ",marker_com.z)
# Get driver inputs
driver_inputs = driver.GetInputs()
# Update modules (process inputs from other modules)
driver.Synchronize(time)
terrain.Synchronize(time)
my_hmmwv.Synchronize(time, driver_inputs, terrain)
app.Synchronize(driver.GetInputModeAsString(), driver_inputs)
# Advance simulation for one timestep for all modules
driver.Advance(step_size)
terrain.Advance(step_size)
my_hmmwv.Advance(step_size)
app.Advance(step_size)
# Update sensor manager
# Will render/save/filter automatically
manager.Update()
# Increment frame number
step_number += 1
# Spin in place for real time to catch up
realtime_timer.Spin(step_size)
return 0
# -----------------
# Sensor parameters
# -----------------
# Update rate of each sensor in Hz
cam_update_rate = 5
imu_update_rate = 200
gps_update_rate = 2
# Image width and height
image_width = 1280
image_height = 720
# Lag time for each sensor
cam_lag = 0
imu_lag = 0
gps_lag = 0
# Collection window for each sensor
# Typically 1 / update rate
cam_collection_time = 0 # instant
imu_collection_time = 0 # instant
gps_collection_time = 1. / float(gps_update_rate)
# GPS reference point
# Located in Madison, WI
gps_reference = chrono.ChVectorD(-89.400, 43.070, 260.0)
# IMU and GPS noise models
# Setting to none (does not affect the data)
imu_noise_none = sens.ChIMUNoiseNone()
gps_noise_none = sens.ChGPSNoiseNone()
# ------------------
# Vehicle parameters
# ------------------
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('path/to/data')
veh.SetDataPath(chrono.GetChronoDataPath() + 'vehicle/')
# Initial vehicle location and orientation
initLoc = chrono.ChVectorD(0, 0, 1.6)
initRot = chrono.ChQuaternionD(1, 0, 0, 0)
# Visualization type for vehicle parts (PRIMITIVES, MESH, or NONE)
chassis_vis_type = veh.VisualizationType_MESH
suspension_vis_type = veh.VisualizationType_PRIMITIVES
steering_vis_type = veh.VisualizationType_PRIMITIVES
wheel_vis_type = veh.VisualizationType_MESH
tire_vis_type = veh.VisualizationType_MESH
# Collision type for chassis (PRIMITIVES, MESH, or NONE)
chassis_collision_type = veh.ChassisCollisionType_NONE
# Type of powertrain model (SHAFTS, SIMPLE)
powertrain_model = veh.PowertrainModelType_SHAFTS
# Drive type (FWD, RWD, or AWD)
drive_type = veh.DrivelineType_AWD
# Steering type (PITMAN_ARM or PITMAN_ARM_SHAFTS)
steering_type = veh.SteeringType_PITMAN_ARM
# Type of tire model (RIGID, RIGID_MESH, PACEJKA, LUGRE, FIALA, PAC89)
tire_model = veh.TireModelType_TMEASY
# Rigid terrain
terrainHeight = 0; # terrain height (FLAT terrain only)
terrainLength = 100.0; # size in X direction
terrainWidth = 100.0; # size in Y direction
# Point on chassis tracked by the camera
trackPoint = chrono.ChVectorD(0.0, 0.0, 1.75)
# Contact method
contact_method = chrono.ChContactMethod_SMC
contact_vis = False;
# ---------------------
# Simulation parameters
# ---------------------
# Simulation step sizes
step_size = 1e-3;
tire_step_size = step_size;
# Simulation end time
t_end = 1000;
# Time interval between two render frames
render_step_size = 1.0 / 50; # FPS = 50
# Output directory
out_dir = "SENSOR_OUTPUT/"
# Debug logging
debug_output = False
debug_step_size = 1.0 / 1 # FPS = 1
vis = True
save = False
# POV-Ray output
povray_output = False
main()
|
|
"""
Handlers for keys related to number theory: prime, even, odd, etc.
"""
from sympy.assumptions import Q, ask
from sympy.assumptions.handlers import CommonHandler
from sympy.ntheory import isprime
class AskPrimeHandler(CommonHandler):
"""
Handler for key 'prime'
Test that an expression represents a prime number
"""
@staticmethod
def _number(expr, assumptions):
# helper method
if (expr.as_real_imag()[1] == 0) and int(expr.evalf()) == expr:
return isprime(expr.evalf(1))
return False
@staticmethod
def Basic(expr, assumptions):
# Just use int(expr) once
# http://code.google.com/p/sympy/issues/detail?id=1462
# is solved
if expr.is_number:
return AskPrimeHandler._number(expr, assumptions)
@staticmethod
def Mul(expr, assumptions):
if expr.is_number:
return AskPrimeHandler._number(expr, assumptions)
for arg in expr.args:
if ask(arg, Q.integer, assumptions):
pass
else: break
else:
# a product of integers can't be a prime
return False
@staticmethod
def Pow(expr, assumptions):
"""
Integer**Integer -> !Prime
"""
if expr.is_number:
return AskPrimeHandler._number(expr, assumptions)
if ask(expr.exp, Q.integer, assumptions) and \
ask(expr.base, Q.integer, assumptions):
return False
@staticmethod
def Integer(expr, assumptions):
return isprime(expr)
@staticmethod
def Rational(expr, assumptions):
return False
@staticmethod
def Real(expr, assumptions):
return AskPrimeHandler._number(expr, assumptions)
@staticmethod
def Infinity(expr, assumptions):
return False
@staticmethod
def NegativeInfinity(expr, assumptions):
return False
@staticmethod
def ImaginaryUnit(expr, assumptions):
return False
@staticmethod
def NumberSymbol(expr, assumptions):
return AskPrimeHandler._number(expr, assumptions)
class AskCompositeHandler(CommonHandler):
@staticmethod
def Basic(expr, assumptions):
_positive = ask(expr, Q.positive, assumptions)
if _positive:
_integer = ask(expr, Q.integer, assumptions)
if _integer:
_prime = ask(expr, Q.prime, assumptions)
if _prime is None: return
return not _prime
else: return _integer
else: return _positive
class AskEvenHandler(CommonHandler):
@staticmethod
def _number(expr, assumptions):
# helper method
if (expr.as_real_imag()[1] == 0) and expr.evalf(1) == expr:
return float(expr.evalf()) % 2 == 0
else: return False
@staticmethod
def Basic(expr, assumptions):
if expr.is_number:
return AskEvenHandler._number(expr, assumptions)
@staticmethod
def Mul(expr, assumptions):
"""
Even * Integer -> Even
Even * Odd -> Even
Integer * Odd -> ?
Odd * Odd -> Odd
"""
if expr.is_number:
return AskEvenHandler._number(expr, assumptions)
even, odd, irrational = False, 0, False
for arg in expr.args:
# check for all integers and at least one even
if ask(arg, Q.integer, assumptions):
if ask(arg, Q.even, assumptions):
even = True
elif ask(arg, Q.odd, assumptions):
odd += 1
elif ask(arg, Q.irrational, assumptions):
# one irrational makes the result False
# two makes it undefined
if irrational:
break
irrational = True
else: break
else:
if irrational: return False
if even: return True
if odd == len(expr.args): return False
@staticmethod
def Add(expr, assumptions):
"""
Even + Odd -> Odd
Even + Even -> Even
Odd + Odd -> Even
TODO: remove float() when issue
http://code.google.com/p/sympy/issues/detail?id=1473
is solved
"""
if expr.is_number:
return AskEvenHandler._number(expr, assumptions)
_result = True
for arg in expr.args:
if ask(arg, Q.even, assumptions):
pass
elif ask(arg, Q.odd, assumptions):
_result = not _result
else: break
else:
return _result
@staticmethod
def Integer(expr, assumptions):
return not bool(expr.p & 1)
@staticmethod
def Rational(expr, assumptions):
return False
@staticmethod
def Real(expr, assumptions):
return expr % 2 == 0
@staticmethod
def Infinity(expr, assumptions):
return False
@staticmethod
def NegativeInfinity(expr, assumptions):
return False
@staticmethod
def NumberSymbol(expr, assumptions):
return AskEvenHandler._number(expr, assumptions)
@staticmethod
def ImaginaryUnit(expr, assumptions):
return False
@staticmethod
def Abs(expr, assumptions):
if ask(expr.args[0], Q.real, assumptions):
return ask(expr.args[0], Q.even, assumptions)
@staticmethod
def re(expr, assumptions):
if ask(expr.args[0], Q.real, assumptions):
return ask(expr.args[0], Q.even, assumptions)
@staticmethod
def im(expr, assumptions):
if ask(expr.args[0], Q.real, assumptions):
return True
class AskOddHandler(CommonHandler):
"""
Handler for key 'odd'
Test that an expression represents an odd number
"""
@staticmethod
def Basic(expr, assumptions):
_integer = ask(expr, Q.integer, assumptions)
if _integer:
_even = ask(expr, Q.even, assumptions)
if _even is None: return None
return not _even
return _integer
|
|
"""The tests for the Tasmota light platform."""
import copy
import json
from unittest.mock import patch
from hatasmota.const import CONF_MAC
from hatasmota.utils import (
get_topic_stat_result,
get_topic_tele_state,
get_topic_tele_will,
)
from homeassistant.components import light
from homeassistant.components.light import SUPPORT_EFFECT, SUPPORT_TRANSITION
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from homeassistant.const import ATTR_ASSUMED_STATE, STATE_OFF, STATE_ON
from .test_common import (
DEFAULT_CONFIG,
help_test_availability,
help_test_availability_discovery_update,
help_test_availability_poll_state,
help_test_availability_when_connection_lost,
help_test_discovery_device_remove,
help_test_discovery_removal,
help_test_discovery_update_unchanged,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
)
from tests.common import async_fire_mqtt_message
from tests.components.light import common
async def test_attributes_on_off(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
config["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.attributes.get("effect_list") is None
assert state.attributes.get("min_mireds") is None
assert state.attributes.get("max_mireds") is None
assert state.attributes.get("supported_features") == 0
assert state.attributes.get("supported_color_modes") == ["onoff"]
assert state.attributes.get("color_mode") == "onoff"
async def test_attributes_dimmer_tuya(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 1 # 1 channel light (dimmer)
config["ty"] = 1 # Tuya device
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.attributes.get("effect_list") is None
assert state.attributes.get("min_mireds") is None
assert state.attributes.get("max_mireds") is None
assert state.attributes.get("supported_features") == 0
assert state.attributes.get("supported_color_modes") == ["brightness"]
assert state.attributes.get("color_mode") == "brightness"
async def test_attributes_dimmer(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 1 # 1 channel light (dimmer)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.attributes.get("effect_list") is None
assert state.attributes.get("min_mireds") is None
assert state.attributes.get("max_mireds") is None
assert state.attributes.get("supported_features") == SUPPORT_TRANSITION
assert state.attributes.get("supported_color_modes") == ["brightness"]
assert state.attributes.get("color_mode") == "brightness"
async def test_attributes_ct(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 2 # 2 channel light (CW)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.attributes.get("effect_list") is None
assert state.attributes.get("min_mireds") == 153
assert state.attributes.get("max_mireds") == 500
assert state.attributes.get("supported_features") == SUPPORT_TRANSITION
assert state.attributes.get("supported_color_modes") == ["color_temp"]
assert state.attributes.get("color_mode") == "color_temp"
async def test_attributes_ct_reduced(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 2 # 2 channel light (CW)
config["so"]["82"] = 1 # Reduced CT range
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.attributes.get("effect_list") is None
assert state.attributes.get("min_mireds") == 200
assert state.attributes.get("max_mireds") == 380
assert state.attributes.get("supported_features") == SUPPORT_TRANSITION
assert state.attributes.get("supported_color_modes") == ["color_temp"]
assert state.attributes.get("color_mode") == "color_temp"
async def test_attributes_rgb(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 3 # 3 channel light (RGB)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.attributes.get("effect_list") == [
"None",
"Wake up",
"Cycle up",
"Cycle down",
"Random",
]
assert state.attributes.get("min_mireds") is None
assert state.attributes.get("max_mireds") is None
assert (
state.attributes.get("supported_features")
== SUPPORT_EFFECT | SUPPORT_TRANSITION
)
assert state.attributes.get("supported_color_modes") == ["hs"]
assert state.attributes.get("color_mode") == "hs"
async def test_attributes_rgbw(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 4 # 4 channel light (RGBW)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.attributes.get("effect_list") == [
"None",
"Wake up",
"Cycle up",
"Cycle down",
"Random",
]
assert state.attributes.get("min_mireds") is None
assert state.attributes.get("max_mireds") is None
assert (
state.attributes.get("supported_features")
== SUPPORT_EFFECT | SUPPORT_TRANSITION
)
assert state.attributes.get("supported_color_modes") == ["hs", "white"]
assert state.attributes.get("color_mode") == "hs"
async def test_attributes_rgbww(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 5 # 5 channel light (RGBCW)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.attributes.get("effect_list") == [
"None",
"Wake up",
"Cycle up",
"Cycle down",
"Random",
]
assert state.attributes.get("min_mireds") == 153
assert state.attributes.get("max_mireds") == 500
assert (
state.attributes.get("supported_features")
== SUPPORT_EFFECT | SUPPORT_TRANSITION
)
assert state.attributes.get("supported_color_modes") == ["color_temp", "hs"]
assert state.attributes.get("color_mode") == "color_temp"
async def test_attributes_rgbww_reduced(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 5 # 5 channel light (RGBCW)
config["so"]["82"] = 1 # Reduced CT range
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.attributes.get("effect_list") == [
"None",
"Wake up",
"Cycle up",
"Cycle down",
"Random",
]
assert state.attributes.get("min_mireds") == 200
assert state.attributes.get("max_mireds") == 380
assert (
state.attributes.get("supported_features")
== SUPPORT_EFFECT | SUPPORT_TRANSITION
)
assert state.attributes.get("supported_color_modes") == ["color_temp", "hs"]
assert state.attributes.get("color_mode") == "color_temp"
async def test_controlling_state_via_mqtt_on_off(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
config["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
assert "color_mode" not in state.attributes
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
assert "color_mode" not in state.attributes
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("color_mode") == "onoff"
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"OFF"}')
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert "color_mode" not in state.attributes
async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("color_mode") == "onoff"
async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"OFF"}')
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert "color_mode" not in state.attributes
async def test_controlling_state_via_mqtt_ct(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 2 # 2 channel light (CT)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
assert "color_mode" not in state.attributes
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
assert "color_mode" not in state.attributes
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("color_mode") == "color_temp"
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"OFF"}')
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert "color_mode" not in state.attributes
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Dimmer":50}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 128
assert state.attributes.get("color_mode") == "color_temp"
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","CT":300}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("color_temp") == 300
assert state.attributes.get("color_mode") == "color_temp"
# Tasmota will send "Color" also for CT light, this should be ignored
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Color":"255,128"}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("color_temp") == 300
assert state.attributes.get("brightness") == 128
assert state.attributes.get("color_mode") == "color_temp"
async def test_controlling_state_via_mqtt_rgbw(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 4 # 4 channel light (RGBW)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
assert "color_mode" not in state.attributes
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
assert "color_mode" not in state.attributes
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("color_mode") == "hs"
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"OFF"}')
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert "color_mode" not in state.attributes
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Dimmer":50,"White":0}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 128
assert state.attributes.get("color_mode") == "hs"
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Dimmer":75,"White":75}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 191
assert state.attributes.get("color_mode") == "white"
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/STATE",
'{"POWER":"ON","Dimmer":50,"HSBColor":"30,100,50","White":0}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 128
assert state.attributes.get("hs_color") == (30, 100)
assert state.attributes.get("color_mode") == "hs"
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","White":50}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 128
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("color_mode") == "white"
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Dimmer":0}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 0
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("color_mode") == "white"
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Scheme":3}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("effect") == "Cycle down"
async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"OFF"}')
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async def test_controlling_state_via_mqtt_rgbww(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 5 # 5 channel light (RGBCW)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
assert "color_mode" not in state.attributes
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
assert "color_mode" not in state.attributes
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("color_mode") == "color_temp"
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"OFF"}')
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert "color_mode" not in state.attributes
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Dimmer":50}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 128
assert state.attributes.get("color_mode") == "color_temp"
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/STATE",
'{"POWER":"ON","Dimmer":50,"HSBColor":"30,100,50","White":0}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("hs_color") == (30, 100)
assert state.attributes.get("color_mode") == "hs"
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","White":50}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert "white_value" not in state.attributes
# Setting white > 0 should clear the color
assert "rgb_color" not in state.attributes
assert state.attributes.get("color_mode") == "color_temp"
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","CT":300}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("color_temp") == 300
assert state.attributes.get("color_mode") == "color_temp"
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","White":0}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
# Setting white to 0 should clear the color_temp
assert "white_value" not in state.attributes
assert "color_temp" not in state.attributes
assert state.attributes.get("hs_color") == (30, 100)
assert state.attributes.get("color_mode") == "hs"
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Scheme":3}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("effect") == "Cycle down"
async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"OFF"}')
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async def test_controlling_state_via_mqtt_rgbww_tuya(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 5 # 5 channel light (RGBCW)
config["ty"] = 1 # Tuya device
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
assert "color_mode" not in state.attributes
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
assert "color_mode" not in state.attributes
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("color_mode") == "color_temp"
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"OFF"}')
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert "color_mode" not in state.attributes
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Dimmer":50}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 128
assert state.attributes.get("color_mode") == "color_temp"
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/STATE",
'{"POWER":"ON","HSBColor":"30,100,0","White":0}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("hs_color") == (30, 100)
assert state.attributes.get("color_mode") == "hs"
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/STATE",
'{"POWER":"ON","Dimmer":0}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("hs_color") == (30, 100)
assert state.attributes.get("color_mode") == "hs"
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Dimmer":50,"White":50}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert "white_value" not in state.attributes
# Setting white > 0 should clear the color
assert "rgb_color" not in state.attributes
assert state.attributes.get("color_mode") == "color_temp"
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","CT":300}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("color_temp") == 300
assert state.attributes.get("color_mode") == "color_temp"
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","White":0}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
# Setting white to 0 should clear the white_value and color_temp
assert not state.attributes.get("white_value")
assert not state.attributes.get("color_temp")
assert state.attributes.get("color_mode") == "hs"
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Scheme":3}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("effect") == "Cycle down"
async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"OFF"}')
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async def test_sending_mqtt_commands_on_off(hass, mqtt_mock, setup_tasmota):
"""Test the sending MQTT commands."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
config["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Turn the light on and verify MQTT message is sent
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Power1", "ON", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Tasmota is not optimistic, the state should still be off
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# Turn the light off and verify MQTT message is sent
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Power1", "OFF", 0, False
)
mqtt_mock.async_publish.reset_mock()
async def test_sending_mqtt_commands_rgbww_tuya(hass, mqtt_mock, setup_tasmota):
"""Test the sending MQTT commands."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 5 # 5 channel light (RGBCW)
config["ty"] = 1 # Tuya device
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Turn the light on and verify MQTT message is sent
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog", "NoDelay;Power1 ON", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Tasmota is not optimistic, the state should still be off
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# Turn the light off and verify MQTT message is sent
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog", "NoDelay;Power1 OFF", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Turn the light on and verify MQTT messages are sent
await common.async_turn_on(hass, "light.test", brightness=192)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog", "NoDelay;Dimmer3 75", 0, False
)
async def test_sending_mqtt_commands_rgbw_legacy(hass, mqtt_mock, setup_tasmota):
"""Test the sending MQTT commands."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["sw"] = "9.4.0.3" # RGBW support was added in 9.4.0.4
config["rl"][0] = 2
config["lt_st"] = 4 # 4 channel light (RGBW)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Turn the light on and verify MQTT message is sent
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog", "NoDelay;Power1 ON", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Tasmota is not optimistic, the state should still be off
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# Turn the light off and verify MQTT message is sent
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog", "NoDelay;Power1 OFF", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Turn the light on and verify MQTT messages are sent
await common.async_turn_on(hass, "light.test", brightness=192)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog", "NoDelay;Dimmer 75", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Set color when setting color
await common.async_turn_on(hass, "light.test", hs_color=[0, 100])
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON;NoDelay;HsbColor1 0;NoDelay;HsbColor2 100",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Set white when setting white
await common.async_turn_on(hass, "light.test", white=128)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON;NoDelay;White 50",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# rgbw_color should be ignored
await common.async_turn_on(hass, "light.test", rgbw_color=[128, 64, 32, 0])
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# rgbw_color should be ignored
await common.async_turn_on(hass, "light.test", rgbw_color=[16, 64, 32, 128])
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", white_value=128)
# white_value should be ignored
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", effect="Random")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON;NoDelay;Scheme 4",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
async def test_sending_mqtt_commands_rgbw(hass, mqtt_mock, setup_tasmota):
"""Test the sending MQTT commands."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 4 # 4 channel light (RGBW)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Turn the light on and verify MQTT message is sent
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog", "NoDelay;Power1 ON", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Tasmota is not optimistic, the state should still be off
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# Turn the light off and verify MQTT message is sent
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog", "NoDelay;Power1 OFF", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Turn the light on and verify MQTT messages are sent
await common.async_turn_on(hass, "light.test", brightness=192)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog", "NoDelay;Dimmer 75", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Set color when setting color
await common.async_turn_on(hass, "light.test", hs_color=[180, 50])
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON;NoDelay;HsbColor1 180;NoDelay;HsbColor2 50",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Set white when setting white
await common.async_turn_on(hass, "light.test", white=128)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON;NoDelay;White 50",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# rgbw_color should be ignored
await common.async_turn_on(hass, "light.test", rgbw_color=[128, 64, 32, 0])
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# rgbw_color should be ignored
await common.async_turn_on(hass, "light.test", rgbw_color=[16, 64, 32, 128])
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", white_value=128)
# white_value should be ignored
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", effect="Random")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON;NoDelay;Scheme 4",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
async def test_sending_mqtt_commands_rgbww(hass, mqtt_mock, setup_tasmota):
"""Test the sending MQTT commands."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 5 # 5 channel light (RGBCW)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Turn the light on and verify MQTT message is sent
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog", "NoDelay;Power1 ON", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Tasmota is not optimistic, the state should still be off
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# Turn the light off and verify MQTT message is sent
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog", "NoDelay;Power1 OFF", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Turn the light on and verify MQTT messages are sent
await common.async_turn_on(hass, "light.test", brightness=192)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog", "NoDelay;Dimmer 75", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", hs_color=[240, 75])
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON;NoDelay;HsbColor1 240;NoDelay;HsbColor2 75",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", color_temp=200)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON;NoDelay;CT 200",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", white_value=128)
# white_value should be ignored
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", effect="Random")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Power1 ON;NoDelay;Scheme 4",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
async def test_sending_mqtt_commands_power_unlinked(hass, mqtt_mock, setup_tasmota):
"""Test the sending MQTT commands to a light with unlinked dimlevel and power."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 1 # 1 channel light (dimmer)
config["so"]["20"] = 1 # Update of Dimmer/Color/CT without turning power on
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Turn the light on and verify MQTT message is sent
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog", "NoDelay;Power1 ON", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Tasmota is not optimistic, the state should still be off
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# Turn the light off and verify MQTT message is sent
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog", "NoDelay;Power1 OFF", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Turn the light on and verify MQTT messages are sent; POWER should be sent
await common.async_turn_on(hass, "light.test", brightness=192)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Dimmer 75;NoDelay;Power1 ON",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
async def test_transition(hass, mqtt_mock, setup_tasmota):
"""Test transition commands."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 5 # 5 channel light (RGBCW)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Dim the light from 0->100: Speed should be 4*2=8
await common.async_turn_on(hass, "light.test", brightness=255, transition=4)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Fade2 1;NoDelay;Speed2 8;NoDelay;Dimmer 100",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Dim the light from 0->100: Speed should be capped at 40
await common.async_turn_on(hass, "light.test", brightness=255, transition=100)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Fade2 1;NoDelay;Speed2 40;NoDelay;Dimmer 100",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Dim the light from 0->0: Speed should be 1
await common.async_turn_on(hass, "light.test", brightness=0, transition=100)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Fade2 1;NoDelay;Speed2 1;NoDelay;Power1 OFF",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Dim the light from 0->50: Speed should be 4*2*2=16
await common.async_turn_on(hass, "light.test", brightness=128, transition=4)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Fade2 1;NoDelay;Speed2 16;NoDelay;Dimmer 50",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Fake state update from the light
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Dimmer":50}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 128
# Dim the light from 50->0: Speed should be 6*2*2=24
await common.async_turn_off(hass, "light.test", transition=6)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Fade2 1;NoDelay;Speed2 24;NoDelay;Power1 OFF",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Fake state update from the light
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Dimmer":100}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
# Dim the light from 100->0: Speed should be 0
await common.async_turn_off(hass, "light.test", transition=0)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Fade2 0;NoDelay;Power1 OFF",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Fake state update from the light
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/STATE",
'{"POWER":"ON","Dimmer":50, "Color":"0,255,0","HSBColor":"120,100,50","White":0}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 128
assert state.attributes.get("rgb_color") == (0, 255, 0)
# Set color of the light from 0,255,0 to 255,0,0 @ 50%: Speed should be 6*2*2=24
await common.async_turn_on(hass, "light.test", rgb_color=[255, 0, 0], transition=6)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Fade2 1;NoDelay;Speed2 24;NoDelay;Power1 ON;NoDelay;HsbColor1 0;NoDelay;HsbColor2 100",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Fake state update from the light
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/STATE",
'{"POWER":"ON","Dimmer":100, "Color":"0,255,0","HSBColor":"120,100,50"}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
assert state.attributes.get("rgb_color") == (0, 255, 0)
# Set color of the light from 0,255,0 to 255,0,0 @ 100%: Speed should be 6*2=12
await common.async_turn_on(hass, "light.test", rgb_color=[255, 0, 0], transition=6)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Fade2 1;NoDelay;Speed2 12;NoDelay;Power1 ON;NoDelay;HsbColor1 0;NoDelay;HsbColor2 100",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Fake state update from the light
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/STATE",
'{"POWER":"ON","Dimmer":50, "CT":153, "White":50}',
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 128
assert state.attributes.get("color_temp") == 153
# Set color_temp of the light from 153 to 500 @ 50%: Speed should be 6*2*2=24
await common.async_turn_on(hass, "light.test", color_temp=500, transition=6)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Fade2 1;NoDelay;Speed2 24;NoDelay;Power1 ON;NoDelay;CT 500",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Fake state update from the light
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Dimmer":50, "CT":500}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 128
assert state.attributes.get("color_temp") == 500
# Set color_temp of the light from 500 to 326 @ 50%: Speed should be 6*2*2*2=48->40
await common.async_turn_on(hass, "light.test", color_temp=326, transition=6)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Fade2 1;NoDelay;Speed2 40;NoDelay;Power1 ON;NoDelay;CT 326",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
async def test_transition_fixed(hass, mqtt_mock, setup_tasmota):
"""Test transition commands."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 5 # 5 channel light (RGBCW)
config["so"]["117"] = 1 # fading at fixed duration instead of fixed slew rate
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Dim the light from 0->100: Speed should be 4*2=8
await common.async_turn_on(hass, "light.test", brightness=255, transition=4)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Fade2 1;NoDelay;Speed2 8;NoDelay;Dimmer 100",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Dim the light from 0->100: Speed should be capped at 40
await common.async_turn_on(hass, "light.test", brightness=255, transition=100)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Fade2 1;NoDelay;Speed2 40;NoDelay;Dimmer 100",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Dim the light from 0->0: Speed should be 4*2=8
await common.async_turn_on(hass, "light.test", brightness=0, transition=4)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Fade2 1;NoDelay;Speed2 8;NoDelay;Power1 OFF",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Dim the light from 0->50: Speed should be 4*2=8
await common.async_turn_on(hass, "light.test", brightness=128, transition=4)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Fade2 1;NoDelay;Speed2 8;NoDelay;Dimmer 50",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Dim the light from 0->50: Speed should be 0
await common.async_turn_on(hass, "light.test", brightness=128, transition=0)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
"NoDelay;Fade2 0;NoDelay;Dimmer 50",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
async def test_relay_as_light(hass, mqtt_mock, setup_tasmota):
"""Test relay show up as light in light mode."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
config["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("switch.test")
assert state is None
state = hass.states.get("light.test")
assert state is not None
async def _test_split_light(hass, mqtt_mock, config, num_lights, num_switches):
"""Test multi-channel light split to single-channel dimmers."""
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("switch")) == num_switches
assert len(hass.states.async_entity_ids("light")) == num_lights
lights = hass.states.async_entity_ids("light")
for idx, entity in enumerate(lights):
mqtt_mock.async_publish.reset_mock()
# Turn the light on and verify MQTT message is sent
await common.async_turn_on(hass, entity)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
f"NoDelay;Power{idx+num_switches+1} ON",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Dim the light and verify MQTT message is sent
await common.async_turn_on(hass, entity, brightness=(idx + 1) * 25.5)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
f"NoDelay;Channel{idx+num_switches+1} {(idx+1)*10}",
0,
False,
)
async def test_split_light(hass, mqtt_mock, setup_tasmota):
"""Test multi-channel light split to single-channel dimmers."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["rl"][1] = 2
config["rl"][2] = 2
config["rl"][3] = 2
config["rl"][4] = 2
config["so"][68] = 1 # Multi-channel PWM instead of a single light
config["lt_st"] = 5 # 5 channel light (RGBCW)
await _test_split_light(hass, mqtt_mock, config, 5, 0)
async def test_split_light2(hass, mqtt_mock, setup_tasmota):
"""Test multi-channel light split to single-channel dimmers."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
config["rl"][1] = 1
config["rl"][2] = 2
config["rl"][3] = 2
config["rl"][4] = 2
config["rl"][5] = 2
config["rl"][6] = 2
config["so"][68] = 1 # Multi-channel PWM instead of a single light
config["lt_st"] = 5 # 5 channel light (RGBCW)
await _test_split_light(hass, mqtt_mock, config, 5, 2)
async def _test_unlinked_light(hass, mqtt_mock, config, num_switches):
"""Test rgbww light split to rgb+ww."""
mac = config["mac"]
num_lights = 2
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("switch")) == num_switches
assert len(hass.states.async_entity_ids("light")) == num_lights
lights = hass.states.async_entity_ids("light")
for idx, entity in enumerate(lights):
mqtt_mock.async_publish.reset_mock()
# Turn the light on and verify MQTT message is sent
await common.async_turn_on(hass, entity)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
f"NoDelay;Power{idx+num_switches+1} ON",
0,
False,
)
mqtt_mock.async_publish.reset_mock()
# Dim the light and verify MQTT message is sent
await common.async_turn_on(hass, entity, brightness=(idx + 1) * 25.5)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Backlog",
f"NoDelay;Dimmer{idx+1} {(idx+1)*10}",
0,
False,
)
async def test_unlinked_light(hass, mqtt_mock, setup_tasmota):
"""Test rgbww light split to rgb+ww."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["rl"][1] = 2
config["lk"] = 0 # RGB + white channels unlinked
config["lt_st"] = 5 # 5 channel light (RGBCW)
await _test_unlinked_light(hass, mqtt_mock, config, 0)
async def test_unlinked_light2(hass, mqtt_mock, setup_tasmota):
"""Test rgbww light split to rgb+ww."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
config["rl"][1] = 1
config["rl"][2] = 2
config["rl"][3] = 2
config["lk"] = 0 # RGB + white channels unlinked
config["lt_st"] = 5 # 5 channel light (RGBCW)
await _test_unlinked_light(hass, mqtt_mock, config, 2)
async def test_discovery_update_reconfigure_light(
hass, mqtt_mock, caplog, setup_tasmota
):
"""Test reconfigure of discovered light."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 1 # 1 channel light (Dimmer)
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["rl"][0] = 2
config2["lt_st"] = 3 # 3 channel light (RGB)
data1 = json.dumps(config)
data2 = json.dumps(config2)
# Simple dimmer
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data1)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.attributes.get("supported_features") == SUPPORT_TRANSITION
assert state.attributes.get("supported_color_modes") == ["brightness"]
# Reconfigure as RGB light
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data2)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert (
state.attributes.get("supported_features")
== SUPPORT_EFFECT | SUPPORT_TRANSITION
)
assert state.attributes.get("supported_color_modes") == ["hs"]
async def test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test availability after MQTT disconnection."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 1 # 1 channel light (Dimmer)
await help_test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, light.DOMAIN, config
)
async def test_availability(hass, mqtt_mock, setup_tasmota):
"""Test availability."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 1 # 1 channel light (Dimmer)
await help_test_availability(hass, mqtt_mock, light.DOMAIN, config)
async def test_availability_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test availability discovery update."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 1 # 1 channel light (Dimmer)
await help_test_availability_discovery_update(hass, mqtt_mock, light.DOMAIN, config)
async def test_availability_poll_state(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test polling after MQTT connection (re)established."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 1 # 1 channel light (Dimmer)
poll_topic = "tasmota_49A3BC/cmnd/STATE"
await help_test_availability_poll_state(
hass, mqtt_client_mock, mqtt_mock, light.DOMAIN, config, poll_topic, ""
)
async def test_discovery_removal_light(hass, mqtt_mock, caplog, setup_tasmota):
"""Test removal of discovered light."""
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["rl"][0] = 2
config1["lt_st"] = 1 # 1 channel light (Dimmer)
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["rl"][0] = 0
config2["lt_st"] = 0
await help_test_discovery_removal(
hass, mqtt_mock, caplog, light.DOMAIN, config1, config2
)
async def test_discovery_removal_relay_as_light(hass, mqtt_mock, caplog, setup_tasmota):
"""Test removal of discovered relay as light."""
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["rl"][0] = 1
config1["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["rl"][0] = 1
config2["so"]["30"] = 0 # Disable Home Assistant auto-discovery as light
await help_test_discovery_removal(
hass, mqtt_mock, caplog, light.DOMAIN, config1, config2
)
async def test_discovery_removal_relay_as_light2(
hass, mqtt_mock, caplog, setup_tasmota
):
"""Test removal of discovered relay as light."""
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["rl"][0] = 1
config1["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["rl"][0] = 0
config2["so"]["30"] = 0 # Disable Home Assistant auto-discovery as light
await help_test_discovery_removal(
hass, mqtt_mock, caplog, light.DOMAIN, config1, config2
)
async def test_discovery_update_unchanged_light(hass, mqtt_mock, caplog, setup_tasmota):
"""Test update of discovered light."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 1 # 1 channel light (Dimmer)
with patch(
"homeassistant.components.tasmota.light.TasmotaLight.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, light.DOMAIN, config, discovery_update
)
async def test_discovery_device_remove(hass, mqtt_mock, setup_tasmota):
"""Test device registry remove."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 1 # 1 channel light (Dimmer)
unique_id = f"{DEFAULT_CONFIG['mac']}_light_light_0"
await help_test_discovery_device_remove(
hass, mqtt_mock, light.DOMAIN, unique_id, config
)
async def test_discovery_device_remove_relay_as_light(hass, mqtt_mock, setup_tasmota):
"""Test device registry remove."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
config["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light
unique_id = f"{DEFAULT_CONFIG['mac']}_light_relay_0"
await help_test_discovery_device_remove(
hass, mqtt_mock, light.DOMAIN, unique_id, config
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock, setup_tasmota):
"""Test MQTT subscriptions are managed when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 1 # 1 channel light (Dimmer)
topics = [
get_topic_stat_result(config),
get_topic_tele_state(config),
get_topic_tele_will(config),
]
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, light.DOMAIN, config, topics
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test MQTT discovery update when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 2
config["lt_st"] = 1 # 1 channel light (Dimmer)
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, light.DOMAIN, config
)
|
|
# encoding: utf-8
# FastCGI-to-WSGI bridge for files/pipes transport (not socket)
#
# Copyright (c) 2002, 2003, 2005, 2006 Allan Saddi <allan@saddi.com>
# Copyright (c) 2011 - 2013 Ruslan Keba <ruslan@helicontech.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__author__ = 'Allan Saddi <allan@saddi.com>, Ruslan Keba <ruslan@helicontech.com>'
import msvcrt
import struct
import os
import logging
import sys
import traceback
import datetime
import urllib
from optparse import OptionParser
# debug flag
__dbg__ = False
# Constants from the spec.
FCGI_LISTENSOCK_FILENO = 0
FCGI_HEADER_LEN = 8
FCGI_VERSION_1 = 1
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
FCGI_NULL_REQUEST_ID = 0
FCGI_KEEP_CONN = 1
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_MAX_CONNS = 'FCGI_MAX_CONNS'
FCGI_MAX_REQS = 'FCGI_MAX_REQS'
FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS'
FCGI_Header = '!BBHHBx'
FCGI_BeginRequestBody = '!HB5x'
FCGI_EndRequestBody = '!LB3x'
FCGI_UnknownTypeBody = '!B7x'
FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody)
FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody)
FCGI_HEADER_NAMES = (
'ERROR TYPE: 0',
'BEGIN_REQUEST',
'ABORT_REQUEST',
'END_REQUEST',
'PARAMS',
'STDIN',
'STDOUT',
'STDERR',
'DATA',
'GET_VALUES',
'GET_VALUES_RESULT',
'UNKNOWN_TYPE',
)
class InputStream(object):
"""
File-like object representing FastCGI input streams (FCGI_STDIN and
FCGI_DATA). Supports the minimum methods required by WSGI spec.
"""
def __init__(self, conn):
self._conn = conn
# See Server.
self._shrinkThreshold = conn.server.inputStreamShrinkThreshold
self._buf = ''
self._bufList = []
self._pos = 0 # Current read position.
self._avail = 0 # Number of bytes currently available.
self._eof = False # True when server has sent EOF notification.
def _shrinkBuffer(self):
"""Gets rid of already read data (since we can't rewind)."""
if self._pos >= self._shrinkThreshold:
self._buf = self._buf[self._pos:]
self._avail -= self._pos
self._pos = 0
assert self._avail >= 0
def _waitForData(self):
"""Waits for more data to become available."""
self._conn.process_input()
def read(self, n=-1):
if self._pos == self._avail and self._eof:
return ''
while True:
if n < 0 or (self._avail - self._pos) < n:
# Not enough data available.
if self._eof:
# And there's no more coming.
newPos = self._avail
break
else:
# Wait for more data.
self._waitForData()
continue
else:
newPos = self._pos + n
break
# Merge buffer list, if necessary.
if self._bufList:
self._buf += ''.join(self._bufList)
self._bufList = []
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readline(self, length=None):
if self._pos == self._avail and self._eof:
return ''
while True:
# Unfortunately, we need to merge the buffer list early.
if self._bufList:
self._buf += ''.join(self._bufList)
self._bufList = []
# Find newline.
i = self._buf.find('\n', self._pos)
if i < 0:
# Not found?
if self._eof:
# No more data coming.
newPos = self._avail
break
else:
if length is not None and len(self._buf) >= length + self._pos:
newPos = self._pos + length
break
# Wait for more to come.
self._waitForData()
continue
else:
newPos = i + 1
break
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def __iter__(self):
return self
def next(self):
r = self.readline()
if not r:
raise StopIteration
return r
def add_data(self, data):
if not data:
self._eof = True
else:
self._bufList.append(data)
self._avail += len(data)
class OutputStream(object):
"""
FastCGI output stream (FCGI_STDOUT/FCGI_STDERR). By default, calls to
write() or writelines() immediately result in Records being sent back
to the server. Buffering should be done in a higher level!
"""
def __init__(self, conn, req, type, buffered=False):
self._conn = conn
self._req = req
self._type = type
self._buffered = buffered
self._bufList = [] # Used if buffered is True
self.dataWritten = False
self.closed = False
def _write(self, data):
length = len(data)
while length:
to_write = min(length, self._req.server.maxwrite - FCGI_HEADER_LEN)
rec = Record(self._type, self._req.requestId)
rec.contentLength = to_write
rec.contentData = data[:to_write]
self._conn.writeRecord(rec)
data = data[to_write:]
length -= to_write
def write(self, data):
assert not self.closed
if not data:
return
self.dataWritten = True
if self._buffered:
self._bufList.append(data)
else:
self._write(data)
def writelines(self, lines):
assert not self.closed
for line in lines:
self.write(line)
def flush(self):
# Only need to flush if this OutputStream is actually buffered.
if self._buffered:
data = ''.join(self._bufList)
self._bufList = []
self._write(data)
# Though available, the following should NOT be called by WSGI apps.
def close(self):
"""Sends end-of-stream notification, if necessary."""
if not self.closed and self.dataWritten:
self.flush()
rec = Record(self._type, self._req.requestId)
self._conn.writeRecord(rec)
self.closed = True
class TeeOutputStream(object):
"""
Simple wrapper around two or more output file-like objects that copies
written data to all streams.
"""
def __init__(self, streamList):
self._streamList = streamList
def write(self, data):
for f in self._streamList:
f.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
for f in self._streamList:
f.flush()
class StdoutWrapper(object):
"""
Wrapper for sys.stdout so we know if data has actually been written.
"""
def __init__(self, stdout):
self._file = stdout
self.dataWritten = False
def write(self, data):
if data:
self.dataWritten = True
self._file.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def __getattr__(self, name):
return getattr(self._file, name)
def decode_pair(s, pos=0):
"""
Decodes a name/value pair.
The number of bytes decoded as well as the name/value pair
are returned.
"""
nameLength = ord(s[pos])
if nameLength & 128:
nameLength = struct.unpack('!L', s[pos:pos + 4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
valueLength = ord(s[pos])
if valueLength & 128:
valueLength = struct.unpack('!L', s[pos:pos + 4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
name = s[pos:pos + nameLength]
pos += nameLength
value = s[pos:pos + valueLength]
pos += valueLength
return pos, (name, value)
def encode_pair(name, value):
"""
Encodes a name/value pair.
The encoded string is returned.
"""
nameLength = len(name)
if nameLength < 128:
s = chr(nameLength)
else:
s = struct.pack('!L', nameLength | 0x80000000L)
valueLength = len(value)
if valueLength < 128:
s += chr(valueLength)
else:
s += struct.pack('!L', valueLength | 0x80000000L)
return s + name + value
class Record(object):
"""
A FastCGI Record.
Used for encoding/decoding records.
"""
def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID):
self.version = FCGI_VERSION_1
self.type = type
self.requestId = requestId
self.contentLength = 0
self.paddingLength = 0
self.contentData = ''
def _recvall(stream, length):
"""
Attempts to receive length bytes from a socket, blocking if necessary.
(Socket may be blocking or non-blocking.)
"""
if __dbg__: logging.debug('_recvall (%d)' % length)
dataList = []
recvLen = 0
while length:
data = stream.read(length)
if not data: # EOF
break
dataList.append(data)
dataLen = len(data)
recvLen += dataLen
length -= dataLen
# if __dbg__: logging.debug('recived length = %d' % (recvLen))
return ''.join(dataList), recvLen
_recvall = staticmethod(_recvall)
def read(self, stream):
"""Read and decode a Record from a socket."""
try:
header, length = self._recvall(stream, FCGI_HEADER_LEN)
except:
raise
if length < FCGI_HEADER_LEN:
raise EOFError
if __dbg__:
hx = ''
for s in header:
hx += '%x|' % (ord(s))
self.version, self.type, self.requestId, self.contentLength, \
self.paddingLength = struct.unpack(FCGI_Header, header)
if __dbg__:
logging.debug('recv fcgi header: %s %s len: %d' % (
FCGI_HEADER_NAMES[self.type] if self.type is not None and self.type < FCGI_MAXTYPE else
FCGI_HEADER_NAMES[
FCGI_MAXTYPE], hx, len(header)))
if self.contentLength:
try:
self.contentData, length = self._recvall(stream, self.contentLength)
except:
raise EOFError
if length < self.contentLength:
raise EOFError
if self.paddingLength:
try:
self._recvall(stream, self.paddingLength)
except:
raise EOFError
def _sendall(stream, data):
"""
Writes data to a socket and does not return until all the data is sent.
"""
if __dbg__: logging.debug('_sendall: len=%d' % len(data))
stream.write(data)
_sendall = staticmethod(_sendall)
def write(self, stream):
"""Encode and write a Record to a socket."""
if not self.contentLength:
self.paddingLength = 8
else:
self.paddingLength = -self.contentLength & 7
header = struct.pack(FCGI_Header, self.version, self.type,
self.requestId, self.contentLength,
self.paddingLength)
if __dbg__: logging.debug('send fcgi header: %s' % FCGI_HEADER_NAMES[
self.type] if self.type is not None and self.type < FCGI_MAXTYPE else FCGI_HEADER_NAMES[FCGI_MAXTYPE])
self._sendall(stream, header)
if self.contentLength:
if __dbg__: logging.debug('send CONTENT')
self._sendall(stream, self.contentData)
if self.paddingLength:
if __dbg__: logging.debug('send PADDING')
self._sendall(stream, '\x00' * self.paddingLength)
class Request(object):
"""
Represents a single FastCGI request.
These objects are passed to your handler and is the main interface
between your handler and the fcgi module. The methods should not
be called by your handler. However, server, params, stdin, stdout,
stderr, and data are free for your handler's use.
"""
def __init__(self, conn, inputStreamClass):
self._conn = conn
self.server = conn.server
self.params = {}
self.stdin = inputStreamClass(conn)
self.stdout = OutputStream(conn, self, FCGI_STDOUT)
self.stderr = OutputStream(conn, self, FCGI_STDERR)
self.data = inputStreamClass(conn)
def run(self):
"""Runs the handler, flushes the streams, and ends the request."""
try:
protocolStatus, appStatus = self.server.handler(self)
except Exception, instance:
if __dbg__:
logging.error(traceback.format_exc())
raise
# TODO: fix it
# self.stderr.flush()
# if not self.stdout.dataWritten:
# self.server.error(self)
# protocolStatus, appStatus = FCGI_REQUEST_COMPLETE, 0
if __dbg__:
logging.debug('protocolStatus = %d, appStatus = %d' % (protocolStatus, appStatus))
self._flush()
self._end(appStatus, protocolStatus)
def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
self._conn.end_request(self, appStatus, protocolStatus)
def _flush(self):
self.stdout.flush()
self.stderr.flush()
class Connection(object):
"""
A Connection with the web server.
Each Connection is associated with a single socket (which is
connected to the web server) and is responsible for handling all
the FastCGI message processing for that socket.
"""
_multiplexed = False
_inputStreamClass = InputStream
def __init__(self, stdin, stdout, server):
self._stdin = stdin
self._stdout = stdout
self.server = server
# Active Requests for this Connection, mapped by request ID.
self._requests = {}
def run(self):
"""Begin processing data from the socket."""
self._keepGoing = True
while self._keepGoing:
try:
self.process_input()
except KeyboardInterrupt:
break
# except EOFError, inst:
# raise
# if __dbg__: logging.error(str(inst))
# break
def process_input(self):
"""Attempt to read a single Record from the socket and process it."""
# Currently, any children Request threads notify this Connection
# that it is no longer needed by closing the Connection's socket.
# We need to put a timeout on select, otherwise we might get
# stuck in it indefinitely... (I don't like this solution.)
if not self._keepGoing:
return
rec = Record()
rec.read(self._stdin)
if rec.type == FCGI_GET_VALUES:
self._do_get_values(rec)
elif rec.type == FCGI_BEGIN_REQUEST:
self._do_begin_request(rec)
elif rec.type == FCGI_ABORT_REQUEST:
self._do_abort_request(rec)
elif rec.type == FCGI_PARAMS:
self._do_params(rec)
elif rec.type == FCGI_STDIN:
self._do_stdin(rec)
elif rec.type == FCGI_DATA:
self._do_data(rec)
elif rec.requestId == FCGI_NULL_REQUEST_ID:
self._do_unknown_type(rec)
else:
# Need to complain about this.
pass
def writeRecord(self, rec):
"""
Write a Record to the socket.
"""
rec.write(self._stdout)
def end_request(self, req, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
"""
End a Request.
Called by Request objects. An FCGI_END_REQUEST Record is
sent to the web server. If the web server no longer requires
the connection, the socket is closed, thereby ending this
Connection (run() returns).
"""
if not req.aborted:
# write empty packet to stdin
rec = Record(FCGI_STDOUT, req.requestId)
rec.contentData = ''
rec.contentLength = 0
self.writeRecord(rec)
# write end request
rec = Record(FCGI_END_REQUEST, req.requestId)
rec.contentData = struct.pack(FCGI_EndRequestBody, appStatus,
protocolStatus)
rec.contentLength = FCGI_EndRequestBody_LEN
self.writeRecord(rec)
if remove:
if __dbg__: logging.debug('end_request: removing request from list')
del self._requests[req.requestId]
if __dbg__: logging.debug('end_request: flags = %d' % req.flags)
if not (req.flags & FCGI_KEEP_CONN) and not self._requests:
if __dbg__: logging.debug('end_request: set _keepGoing = False')
self._keepGoing = False
def _do_get_values(self, inrec):
"""Handle an FCGI_GET_VALUES request from the web server."""
outrec = Record(FCGI_GET_VALUES_RESULT)
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
cap = self.server.capability.get(name)
if cap is not None:
outrec.contentData += encode_pair(name, str(cap))
outrec.contentLength = len(outrec.contentData)
self.writeRecord(outrec)
def _do_begin_request(self, inrec):
"""Handle an FCGI_BEGIN_REQUEST from the web server."""
role, flags = struct.unpack(FCGI_BeginRequestBody, inrec.contentData)
req = self.server.request_class(self, self._inputStreamClass)
req.requestId, req.role, req.flags = inrec.requestId, role, flags
req.aborted = False
if not self._multiplexed and self._requests:
# Can't multiplex requests.
self.end_request(req, 0L, FCGI_CANT_MPX_CONN, remove=False)
else:
self._requests[inrec.requestId] = req
def _do_abort_request(self, inrec):
"""
Handle an FCGI_ABORT_REQUEST from the web server.
We just mark a flag in the associated Request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
req.aborted = True
self.end_request(req, FCGI_REQUEST_COMPLETE, 0)
def _start_request(self, req):
"""Run the request."""
# Not multiplexed, so run it inline.
req.run()
def _do_params(self, inrec):
"""
Handle an FCGI_PARAMS Record.
If the last FCGI_PARAMS Record is received, start the request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
if inrec.contentLength:
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
req.params[name] = value
def _do_stdin(self, inrec):
"""Handle the FCGI_STDIN stream."""
req = self._requests.get(inrec.requestId)
if inrec.contentLength:
if req is not None:
req.stdin.add_data(inrec.contentData)
else:
self._start_request(req)
def _do_data(self, inrec):
"""Handle the FCGI_DATA stream."""
req = self._requests.get(inrec.requestId)
if req is not None:
req.data.add_data(inrec.contentData)
def _do_unknown_type(self, inrec):
"""Handle an unknown request type. Respond accordingly."""
outrec = Record(FCGI_UNKNOWN_TYPE)
outrec.contentData = struct.pack(FCGI_UnknownTypeBody, inrec.type)
outrec.contentLength = FCGI_UnknownTypeBody_LEN
self.writeRecord(outrec)
class FCGIServer(object):
request_class = Request
maxwrite = 8192
inputStreamShrinkThreshold = 102400 - 8192
def __init__(self, application, environ=None,
multithreaded=False, multiprocess=False,
debug=False, roles=(FCGI_RESPONDER,),
app_root=None):
if environ is None:
environ = {}
self.application = application
self.environ = environ
self.multithreaded = multithreaded
self.multiprocess = multiprocess
self.debug = debug
self.roles = roles
self._connectionClass = Connection
self.capability = {
# If threads aren't available, these are pretty much correct.
FCGI_MAX_CONNS: 1,
FCGI_MAX_REQS: 1,
FCGI_MPXS_CONNS: 0
}
self.app_root = app_root
def run(self):
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
stdin = sys.stdin
stdout = os.fdopen(sys.stdin.fileno(), 'w', 0)
conn = Connection(stdin, stdout, self)
conn.run()
def handler(self, req):
"""Special handler for WSGI."""
if req.role not in self.roles:
return FCGI_UNKNOWN_ROLE, 0
# Mostly taken from example CGI gateway.
environ = req.params
environ.update(self.environ)
environ['wsgi.version'] = (1, 0)
environ['wsgi.input'] = req.stdin
stderr = TeeOutputStream((sys.stderr, req.stderr))
environ['wsgi.errors'] = stderr
environ['wsgi.multithread'] = False
environ['wsgi.multiprocess'] = False
environ['wsgi.run_once'] = False
if environ.get('HTTPS', 'off') in ('on', '1'):
environ['wsgi.url_scheme'] = 'https'
else:
environ['wsgi.url_scheme'] = 'http'
self._sanitizeEnv(environ)
headers_set = []
headers_sent = []
result = None
def write(data):
assert type(data) is str, 'write() argument must be string'
assert headers_set, 'write() before start_response()'
if not headers_sent:
status, responseHeaders = headers_sent[:] = headers_set
found = False
for header, value in responseHeaders:
if header.lower() == 'content-length':
found = True
break
if not found and result is not None:
try:
if len(result) == 1:
responseHeaders.append(('Content-Length',
str(len(data))))
except:
pass
s = 'Status: %s\r\n' % status
for header in responseHeaders:
s += '%s: %s\r\n' % header
s += '\r\n'
req.stdout.write(s)
req.stdout.write(data)
req.stdout.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
# Re-raise if too late
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
else:
assert not headers_set, 'Headers already set!'
assert type(status) is str, 'Status must be a string'
assert len(status) >= 4, 'Status must be at least 4 characters'
assert int(status[:3]), 'Status must begin with 3-digit code'
assert status[3] == ' ', 'Status must have a space after code'
assert type(response_headers) is list, 'Headers must be a list'
if __dbg__:
logging.debug('response headers:')
for name, val in response_headers:
assert type(name) is str, 'Header name "%s" must be a string' % name
assert type(val) is str, 'Value of header "%s" must be a string' % name
logging.debug('%s: %s' % (name, val))
headers_set[:] = [status, response_headers]
return write
try:
try:
result = self.application(environ, start_response)
try:
for data in result:
if data:
write(data)
if not headers_sent:
write('') # in case body was empty
finally:
# if hasattr(result, 'close'):
# result.close()
pass
# except socket.error, e:
# if e[0] != errno.EPIPE:
# raise # Don't let EPIPE propagate beyond server
except:
raise
finally:
pass
return FCGI_REQUEST_COMPLETE, 0
def _sanitizeEnv(self, environ):
"""Ensure certain values are present, if required by WSGI."""
if __dbg__:
logging.debug('raw envs: {0}'.format(environ))
environ['SCRIPT_NAME'] = ''
reqUri = None
if environ.has_key('REQUEST_URI'):
reqUri = environ['REQUEST_URI'].split('?', 1)
if not environ.has_key('PATH_INFO') or not environ['PATH_INFO']:
if reqUri is not None:
environ['PATH_INFO'] = reqUri[0]
else:
environ['PATH_INFO'] = ''
# convert %XX to python unicode
environ['PATH_INFO'] = urllib.unquote(environ['PATH_INFO'])
# process app_root
if self.app_root and environ['PATH_INFO'].startswith(self.app_root):
environ['PATH_INFO'] = environ['PATH_INFO'][len(self.app_root):]
environ['SCRIPT_NAME'] = self.app_root
if not environ.has_key('QUERY_STRING') or not environ['QUERY_STRING']:
if reqUri is not None and len(reqUri) > 1:
environ['QUERY_STRING'] = reqUri[1]
else:
environ['QUERY_STRING'] = ''
# If any of these are missing, it probably signifies a broken
# server...
for name, default in [('REQUEST_METHOD', 'GET'),
('SERVER_NAME', 'localhost'),
('SERVER_PORT', '80'),
('SERVER_PROTOCOL', 'HTTP/1.0')]:
if not environ.has_key(name):
environ['wsgi.errors'].write('%s: missing FastCGI param %s '
'required by WSGI!\n' %
(self.__class__.__name__, name))
environ[name] = default
def error(self, req):
"""
Called by Request if an exception occurs within the handler. May and
should be overridden.
"""
if self.debug:
import cgitb
req.stdout.write('Status: 500 Internal Server Error\r\n' +
'Content-Type: text/html\r\n\r\n' +
cgitb.html(sys.exc_info()))
else:
errorpage = """<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html><head>
<title>Unhandled Exception</title>
</head><body>
<h1>Unhandled Exception</h1>
<p>An unhandled exception was thrown by the application.</p>
</body></html>
"""
req.stdout.write('Status: 500 Internal Server Error\r\n' +
'Content-Type: text/html\r\n\r\n' +
errorpage)
def example_application(environ, start_response):
"""example wsgi app which outputs wsgi environment"""
logging.debug('wsgi app started')
data = ''
env_keys = environ.keys()
env_keys.sort()
for e in env_keys:
data += '%s: %s\n' % (e, environ[e])
data += 'sys.version: ' + sys.version + '\n'
start_response('200 OK', [('Content-Type', 'text/plain'), ('Content-Length', str(len(data)))])
yield data
def run_example_app():
if __dbg__: logging.info('run_fcgi: STARTED')
FCGIServer(example_application).run()
if __dbg__: logging.info('run_fcgi: EXITED')
def run_django_app(django_settings_module, django_root):
"""run django app by django_settings_module, django_settings_module can be python path or physical path """
if os.path.exists(django_settings_module):
# this is physical path
app_path, app_settings = os.path.split(django_settings_module)
# add directory to PYTHONPATH
app_dir = os.path.dirname(app_path)
if app_dir not in sys.path:
sys.path.append(app_dir)
if __dbg__: logging.debug('%s added to PYTHONPATH' % app_dir)
# cut .py extension in module
if app_settings.endswith('.py'):
app_settings = app_settings[:-3]
# get python path to settings
settings_module = '%s.%s' % (os.path.basename(app_path), app_settings)
else:
# consider that django_settings_module is valid python path
settings_module = django_settings_module
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
if __dbg__: logging.info('DJANGO_SETTINGS_MODULE set to %s' % settings_module)
try:
from django.core.handlers.wsgi import WSGIHandler
FCGIServer(WSGIHandler(), app_root=django_root).run()
except ImportError:
if __dbg__: logging.error(
'Could not import django.core.handlers.wsgi module. Check that django is installed and in PYTHONPATH.')
raise
def run_wsgi_app(wsgi_app_path, django_root):
try:
wsgi_app = import_function(wsgi_app_path)
except:
if __dbg__:
logging.error('Could not import WSGI APP: {0}'.format(wsgi_app_path))
raise
FCGIServer(wsgi_app, app_root=django_root).run()
def import_function(func_path):
parts = func_path.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
if __name__ == '__main__':
# parse options
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("", "--django-settings-module", dest="django_settings_module",
help="The FQPN or physical path of Django settings module")
parser.add_option("", "--django-root", dest="django_root",
help="strip this string from the front of any URLs before matching them against your URLconf patterns")
parser.add_option("", "--wsgi-app", dest="wsgi_app",
help="The FQPN of a WSGI application object to serve")
parser.add_option("", "--debug", dest="debug", action="store_true",
help="Enables debug logging")
parser.set_defaults(
django_settings_module=os.environ.get('DJANGO_SETTINGS_MODULE', None),
django_root=os.environ.get('django.root', None),
wsgi_app=os.environ.get('WSGI_APP', None),
debug=os.environ.get('ZOOFCGI_DEBUG', False),
)
(options, args) = parser.parse_args()
__dbg__ = options.debug
# compile self
compiled = os.path.split(__file__)[-1].replace('.py', '.pyc' if __dbg__ else '.pyo')
if not os.path.exists(compiled):
import py_compile
try:
py_compile.compile(__file__)
except:
pass
# enable logging
if __dbg__:
logging.basicConfig(
filename=os.path.join(os.path.dirname(__file__), '_zoofcgi_%s_%d.log' % (
datetime.datetime.now().strftime('%y%m%d_%H%M%S'), os.getpid())),
filemode='w',
format='%(asctime)s [%(levelname)-5s] %(message)s',
level=logging.DEBUG)
if options.django_settings_module:
# check django app by DJANGO_SETTINGS_MODULE
run_django_app(options.django_settings_module, options.django_root)
elif options.wsgi_app:
# run general WSGI app by WSGI_APP
run_wsgi_app(options.wsgi_app, options.django_root)
else:
# run example app
run_example_app()
|
|
from tethys_sdk.testing import TethysTestCase
from tethys_apps.models import TethysApp, PersistentStoreDatabaseSetting, PersistentStoreService
from django.core.exceptions import ValidationError
from django.conf import settings
from tethys_apps.exceptions import TethysAppSettingNotAssigned, PersistentStorePermissionError,\
PersistentStoreInitializerError
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm import sessionmaker
from unittest import mock
class PersistentStoreDatabaseSettingTests(TethysTestCase):
def set_up(self):
self.test_app = TethysApp.objects.get(package='test_app')
# Get default database connection if there is one
if 'default' in settings.DATABASES:
self.conn = settings.DATABASES['default']
else:
self.conn = {
'USER': 'tethys_super',
'PASSWORD': 'pass',
'HOST': 'localhost',
'PORT': '5435'
}
self.expected_url = 'postgresql://{}:{}@{}:{}'.format(
self.conn['USER'], self.conn['PASSWORD'], self.conn['HOST'], self.conn['PORT']
)
self.pss = PersistentStoreService(
name='test_ps',
host=self.conn['HOST'],
port=self.conn['PORT'],
username=self.conn['USER'],
password=self.conn['PASSWORD']
)
self.pss.save()
def tear_down(self):
pass
def test_clean_validation_error(self):
ps_ds_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
ps_ds_setting.persistent_store_service = None
ps_ds_setting.save()
# Check ValidationError
self.assertRaises(ValidationError, self.test_app.settings_set.select_subclasses().get(name='spatial_db').clean)
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.create_persistent_store_database')
def test_initialize(self, mock_create):
ps_ds_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
ps_ds_setting.persistent_store_service = self.pss
ps_ds_setting.save()
# Execute
self.test_app.settings_set.select_subclasses().get(name='spatial_db').initialize()
mock_create.assert_called()
@mock.patch('tethys_apps.models.is_testing_environment')
def test_get_namespaced_persistent_store_name(self, mock_ite):
mock_ite.return_value = False
ps_ds_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
ps_ds_setting.persistent_store_service = self.pss
ps_ds_setting.save()
# Execute
ret = self.test_app.settings_set.select_subclasses().get(name='spatial_db').\
get_namespaced_persistent_store_name()
# Check result
self.assertEqual('test_app_spatial_db', ret)
@mock.patch('tethys_apps.models.is_testing_environment')
def test_get_namespaced_persistent_store_name_testing(self, mock_ite):
mock_ite.return_value = True
ps_ds_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
ps_ds_setting.persistent_store_service = self.pss
ps_ds_setting.save()
# Execute
ret = self.test_app.settings_set.select_subclasses().get(name='spatial_db').\
get_namespaced_persistent_store_name()
# Check result
self.assertEqual('test_app_tethys-testing_spatial_db', ret)
def test_get_value(self):
ps_ds_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
ps_ds_setting.persistent_store_service = self.pss
ps_ds_setting.save()
# Execute
ret = self.test_app.settings_set.select_subclasses().get(name='spatial_db').get_value(with_db=True)
# Check results
self.assertIsInstance(ret, PersistentStoreService)
self.assertEqual('test_ps', ret.name)
self.assertEqual(self.conn['HOST'], ret.host)
self.assertEqual(int(self.conn['PORT']), ret.port)
self.assertEqual(self.conn['USER'], ret.username)
self.assertEqual(self.conn['PASSWORD'], ret.password)
def test_get_value_none(self):
ps_ds_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
ps_ds_setting.persistent_store_service = None
ps_ds_setting.save()
self.assertRaises(TethysAppSettingNotAssigned, PersistentStoreDatabaseSetting.objects
.get(name='spatial_db').get_value)
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.get_namespaced_persistent_store_name')
def test_get_value_with_db(self, mock_gn):
mock_gn.return_value = 'test_database'
ps_ds_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
ps_ds_setting.persistent_store_service = self.pss
ps_ds_setting.save()
# Execute
ret = self.test_app.settings_set.select_subclasses().get(name='spatial_db').get_value(with_db=True)
self.assertIsInstance(ret, PersistentStoreService)
self.assertEqual('test_database', ret.database)
def test_get_value_as_engine(self):
ps_ds_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
ps_ds_setting.persistent_store_service = self.pss
ps_ds_setting.save()
# Execute
ret = self.test_app.settings_set.select_subclasses().get(name='spatial_db').get_value(as_engine=True)
self.assertIsInstance(ret, Engine)
self.assertEqual(self.expected_url, str(ret.url))
def test_get_value_as_sessionmaker(self):
ps_ds_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
ps_ds_setting.persistent_store_service = self.pss
ps_ds_setting.save()
# Execute
ret = self.test_app.settings_set.select_subclasses().get(name='spatial_db').get_value(as_sessionmaker=True)
self.assertIsInstance(ret, sessionmaker)
self.assertEqual(self.expected_url, str(ret.kw['bind'].url))
def test_get_value_as_url(self):
ps_ds_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
ps_ds_setting.persistent_store_service = self.pss
ps_ds_setting.save()
# Execute
ret = self.test_app.settings_set.select_subclasses().get(name='spatial_db').get_value(as_url=True)
# check URL
self.assertEqual(self.expected_url, str(ret))
def test_persistent_store_database_exists(self):
ps_ds_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
ps_ds_setting.persistent_store_service = self.pss
ps_ds_setting.save()
ps_ds_setting.get_namespaced_persistent_store_name = mock.MagicMock(return_value='foo_bar')
ps_ds_setting.get_value = mock.MagicMock()
mock_engine = ps_ds_setting.get_value()
mock_db = mock.MagicMock()
mock_db.name = 'foo_bar'
mock_engine.connect().execute.return_value = [mock_db]
# Execute
ret = ps_ds_setting.persistent_store_database_exists()
self.assertTrue(ret)
def test_persistent_store_database_exists_false(self):
ps_ds_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
ps_ds_setting.persistent_store_service = self.pss
ps_ds_setting.save()
ps_ds_setting.get_namespaced_persistent_store_name = mock.MagicMock(return_value='foo_bar')
ps_ds_setting.get_value = mock.MagicMock()
mock_engine = ps_ds_setting.get_value()
mock_engine.connect().execute.return_value = []
# Execute
ret = ps_ds_setting.persistent_store_database_exists()
self.assertFalse(ret)
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.persistent_store_database_exists')
def test_drop_persistent_store_database_not_exists(self, mock_psd):
mock_psd.return_value = False
# Execute
ret = self.test_app.settings_set.select_subclasses().get(name='spatial_db').drop_persistent_store_database()
self.assertIsNone(ret)
@mock.patch('tethys_apps.models.logging')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.get_value')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.persistent_store_database_exists')
def test_drop_persistent_store_database(self, mock_psd, mock_get, mock_log):
mock_psd.return_value = True
# Execute
self.test_app.settings_set.select_subclasses().get(name='spatial_db').drop_persistent_store_database()
# Check
mock_log.getLogger().info.assert_called_with('Dropping database "spatial_db" for app "test_app"...')
mock_get().connect.assert_called()
rts_call_args = mock_get().connect().execute.call_args_list
self.assertEqual('commit', rts_call_args[0][0][0])
self.assertEqual('DROP DATABASE IF EXISTS "test_app_tethys-testing_spatial_db"', rts_call_args[1][0][0])
mock_get().connect().close.assert_called()
@mock.patch('tethys_apps.models.logging')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.get_value')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.persistent_store_database_exists')
def test_drop_persistent_store_database_exception(self, mock_psd, mock_get, mock_log):
mock_psd.return_value = True
mock_get().connect().execute.side_effect = [Exception('Message: being accessed by other users'),
mock.MagicMock(), mock.MagicMock(), mock.MagicMock()]
# Execute
self.test_app.settings_set.select_subclasses().get(name='spatial_db').drop_persistent_store_database()
# Check
mock_log.getLogger().info.assert_called_with('Dropping database "spatial_db" for app "test_app"...')
mock_get().connect.assert_called()
rts_call_args = mock_get().connect().execute.call_args_list
self.assertEqual('commit', rts_call_args[0][0][0])
self.assertIn('SELECT pg_terminate_backend(pg_stat_activity.pid)', rts_call_args[1][0][0])
mock_get().connect().close.assert_called()
@mock.patch('tethys_apps.models.logging')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.get_value')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.persistent_store_database_exists')
def test_drop_persistent_store_database_connection_exception(self, mock_psd, mock_get, mock_log):
mock_psd.return_value = True
mock_get().connect.side_effect = [Exception('Message: being accessed by other users'),
mock.MagicMock(), mock.MagicMock()]
# Execute
self.test_app.settings_set.select_subclasses().get(name='spatial_db').drop_persistent_store_database()
# Check
mock_log.getLogger().info.assert_called_with('Dropping database "spatial_db" for app "test_app"...')
mock_get().connect.assert_called()
mock_get().connect().execute.assert_not_called()
mock_get().connect().close.assert_not_called()
@mock.patch('tethys_apps.models.logging')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.get_value')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.persistent_store_database_exists')
def test_drop_persistent_store_database_exception_else(self, mock_psd, mock_get, _):
mock_psd.return_value = True
mock_get().connect().execute.side_effect = [Exception('Error Message'), mock.MagicMock()]
# Execute
self.assertRaises(Exception, PersistentStoreDatabaseSetting.objects.
get(name='spatial_db').drop_persistent_store_database)
# Check
mock_get().connect().close.assert_called()
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.drop_persistent_store_database')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.get_namespaced_persistent_store_name')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.persistent_store_database_exists')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.get_value')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.initializer_function')
@mock.patch('tethys_apps.models.logging')
def test_create_persistent_store_database(self, mock_log, mock_init, mock_get, mock_ps_de, mock_gn, mock_drop):
# Mock Get Name
mock_gn.return_value = 'spatial_db'
# Mock Drop Database
mock_drop.return_value = ''
# Mock persistent_store_database_exists
mock_ps_de.return_value = True
# Mock get_values
mock_url = mock.MagicMock(username='test_app')
mock_engine = mock.MagicMock()
mock_new_db_engine = mock.MagicMock()
mock_init_param = mock.MagicMock()
mock_get.side_effect = [mock_url, mock_engine, mock_new_db_engine, mock_init_param]
# Execute
self.test_app.settings_set.select_subclasses().get(name='spatial_db')\
.create_persistent_store_database(refresh=True, force_first_time=True)
# Check mock called
rts_get_args = mock_log.getLogger().info.call_args_list
check_log1 = 'Creating database "spatial_db" for app "test_app"...'
check_log2 = 'Enabling PostGIS on database "spatial_db" for app "test_app"...'
check_log3 = 'Initializing database "spatial_db" for app "test_app" ' \
'with initializer "appsettings.model.init_spatial_db"...'
self.assertEqual(check_log1, rts_get_args[0][0][0])
self.assertEqual(check_log2, rts_get_args[1][0][0])
self.assertEqual(check_log3, rts_get_args[2][0][0])
mock_init.assert_called()
@mock.patch('sqlalchemy.exc')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.drop_persistent_store_database')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.get_namespaced_persistent_store_name')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.persistent_store_database_exists')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.get_value')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.initializer_function')
@mock.patch('tethys_apps.models.logging')
def test_create_persistent_store_database_perm_error(self, _, __, mock_get, mock_ps_de, mock_gn, mock_drop, mock_e):
# Mock Get Name
mock_gn.return_value = 'spatial_db'
# Mock Drop Database
mock_drop.return_value = ''
# Mock persistent_store_database_exists
mock_ps_de.return_value = True
# Mock get_values
mock_url = mock.MagicMock(username='test_app')
mock_engine = mock.MagicMock()
mock_e.ProgrammingError = Exception
mock_engine.connect().execute.side_effect = [mock.MagicMock(), Exception]
mock_get.side_effect = [mock_url, mock_engine]
# Execute
self.assertRaises(PersistentStorePermissionError, PersistentStoreDatabaseSetting
.objects.get(name='spatial_db').create_persistent_store_database, refresh=True)
@mock.patch('sqlalchemy.exc')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.drop_persistent_store_database')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.get_namespaced_persistent_store_name')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.persistent_store_database_exists')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.get_value')
@mock.patch('tethys_apps.models.logging')
def test_create_persistent_store_database_ext_perm_error(self, _, mock_get, mock_ps_de, mock_gn, mock_drop, mock_e):
# Mock Get Name
mock_gn.return_value = 'spatial_db'
# Mock Drop Database
mock_drop.return_value = ''
# Mock persistent_store_database_exists
mock_ps_de.return_value = True
# Mock get_values
mock_url = mock.MagicMock(username='test_app')
mock_engine = mock.MagicMock()
mock_e.ProgrammingError = Exception
mock_new_db_engine = mock.MagicMock()
mock_new_db_engine.connect().execute.side_effect = Exception
mock_get.side_effect = [mock_url, mock_engine, mock_new_db_engine]
# Execute
self.assertRaises(PersistentStorePermissionError, PersistentStoreDatabaseSetting
.objects.get(name='spatial_db').create_persistent_store_database)
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.drop_persistent_store_database')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.get_namespaced_persistent_store_name')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.persistent_store_database_exists')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.get_value')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting.initializer_function')
@mock.patch('tethys_apps.models.logging')
def test_create_persistent_store_database_exception(self, _, mock_init, mock_get, mock_ps_de,
mock_gn, mock_drop):
# Mock initializer_function
mock_init.side_effect = Exception('Initializer Error')
# Mock Get Name
mock_gn.return_value = 'spatial_db'
# Mock Drop Database
mock_drop.return_value = ''
# Mock persistent_store_database_exists
mock_ps_de.return_value = True
# Mock get_values
mock_url = mock.MagicMock(username='test_app')
mock_engine = mock.MagicMock()
mock_new_db_engine = mock.MagicMock()
mock_init_param = mock.MagicMock()
mock_get.side_effect = [mock_url, mock_engine, mock_new_db_engine, mock_init_param]
# Execute
self.assertRaises(PersistentStoreInitializerError, PersistentStoreDatabaseSetting
.objects.get(name='spatial_db').create_persistent_store_database)
|
|
"""
TeamDataCapsule
^^^^^^^^^^^^^^^
"""
import math
from collections import defaultdict
import rospy
from humanoid_league_msgs.msg import Strategy, TeamData
from geometry_msgs.msg import PointStamped
class TeamDataCapsule:
def __init__(self):
self.bot_id = rospy.get_param("bot_id", 1)
self.strategy_sender = None # type: rospy.Publisher
self.time_to_ball_publisher = None # type: rospy.Publisher
# indexed with one to match robot ids
self.team_data = {}
for i in range(1, 7):
self.team_data[i] = TeamData()
self.team_strategy = dict()
self.times_to_ball = dict()
self.roles = {
'striker': Strategy.ROLE_STRIKER,
'offense': Strategy.ROLE_STRIKER,
'supporter': Strategy.ROLE_SUPPORTER,
'defender': Strategy.ROLE_DEFENDER,
'defense': Strategy.ROLE_DEFENDER,
'other': Strategy.ROLE_OTHER,
'goalie': Strategy.ROLE_GOALIE,
'idle': Strategy.ROLE_IDLING
}
self.own_time_to_ball = 9999.0
self.strategy = Strategy()
self.strategy.role = self.roles[rospy.get_param('role')]
self.strategy_update = None
self.action_update = None
self.role_update = None
self.data_timeout = rospy.get_param("team_data_timeout", 2)
self.ball_max_covariance = rospy.get_param("ball_max_covariance", 0.5)
self.ball_lost_time = rospy.Duration(rospy.get_param('behavior/body/ball_lost_time', 8.0))
self.pose_precision_threshold = rospy.get_param('behavior/body/pose_precision_threshold', None)
def is_valid(self, data: TeamData):
return rospy.Time.now() - data.header.stamp < rospy.Duration(self.data_timeout) \
and data.state != TeamData.STATE_PENALIZED
def get_goalie_ball_position(self):
"""Return the ball relative to the goalie
:return a tuple with the relative ball and the last update time
"""
for data in self.team_data.values():
role = data.strategy.role
if role == Strategy.ROLE_GOALIE and self.is_valid(data):
return data.ball_relative.pose.position.x, data.ball_relative.pose.position.y
return None
def get_goalie_ball_distance(self):
"""Return the distance between the goalie and the ball
:return a tuple with the ball-goalie-distance and the last update time
"""
goalie_ball_position = self.get_goalie_ball_position()
if goalie_ball_position is not None:
return math.sqrt(goalie_ball_position[0] ** 2 + goalie_ball_position[1] ** 2)
else:
return None
def is_goalie_handling_ball(self):
""" Returns true if the goalie is going to the ball."""
for data in self.team_data.values():
if self.is_valid(data) \
and data.strategy.role == Strategy.ROLE_GOALIE \
and data.strategy.action in [Strategy.ACTION_GOING_TO_BALL, Strategy.ACTION_KICKING]:
return True
return False
def is_team_mate_kicking(self):
"""Returns true if one of the players in the own team is kicking."""
for data in self.team_data.values():
if self.is_valid(data) and data.strategy.action == Strategy.ACTION_KICKING:
return True
return False
def team_rank_to_ball(self, own_ball_distance, count_goalies=True, use_time_to_ball=False):
"""Returns the rank of this robot compared to the team robots concerning ball distance.
Ignores the goalies distance, as it should not leave the goal, even if it is closer than field players.
For example, we do not want our goalie to perform a throw in against our empty goal.
:return the rank from 1 (nearest) to the number of robots
"""
distances = []
for data in self.team_data.values():
# data should not be outdated, from a robot in play, only goalie if desired,
# x and y covariance values should be below threshold. orientation covariance of ball does not matter
# covariance is a 6x6 matrix as array. 0 is x, 7 is y
if self.is_valid(data) and (
data.strategy.role != Strategy.ROLE_GOALIE or count_goalies) \
and data.ball_absolute.covariance[0] < self.ball_max_covariance \
and data.ball_absolute.covariance[7] < self.ball_max_covariance:
if use_time_to_ball:
distances.append(data.time_to_position_at_ball)
else:
distances.append(self.get_robot_ball_euclidian_distance(data))
for rank, distance in enumerate(sorted(distances)):
if own_ball_distance < distance:
return rank + 1
return len(distances) + 1
def get_robot_ball_euclidian_distance(self, robot_teamdata):
ball_rel_x = robot_teamdata.ball_absolute.pose.position.x - robot_teamdata.robot_position.pose.position.x
ball_rel_y = robot_teamdata.ball_absolute.pose.position.y - robot_teamdata.robot_position.pose.position.y
dist = math.sqrt(ball_rel_x ** 2 + ball_rel_y ** 2)
return dist
def set_role(self, role):
"""Set the role of this robot in the team
:param role: Has to be a role from humanoid_league_msgs/Strategy
"""
assert role in [Strategy.ROLE_STRIKER, Strategy.ROLE_SUPPORTER, Strategy.ROLE_DEFENDER,
Strategy.ROLE_OTHER, Strategy.ROLE_GOALIE, Strategy.ROLE_IDLING]
self.strategy.role = role
self.role_update = rospy.get_time()
def get_role(self):
return self.strategy.role, self.role_update
def set_action(self, action):
"""Set the action of this robot
:param action: An action from humanoid_league_msgs/Strategy"""
assert action in [Strategy.ACTION_UNDEFINED, Strategy.ACTION_POSITIONING, Strategy.ACTION_GOING_TO_BALL,
Strategy.ACTION_TRYING_TO_SCORE, Strategy.ACTION_WAITING, Strategy.ACTION_SEARCHING,
Strategy.ACTION_KICKING, Strategy.ACTION_LOCALIZING]
self.strategy.action = action
self.action_update = rospy.get_time()
def get_action(self):
return self.strategy.action, self.action_update
def set_kickoff_strategy(self, strategy):
assert strategy in [Strategy.SIDE_LEFT, Strategy.SIDE_MIDDLE, Strategy.SIDE_RIGHT]
self.strategy.offensive_side = strategy
self.strategy_update = rospy.get_time()
def get_kickoff_strategy(self):
return self.strategy.offensive_side, self.strategy_update
def get_active_teammate_poses(self, count_goalies=False):
""" Returns the poses of all playing robots """
poses = []
for data in self.team_data.values():
if self.is_valid(data) and (data.strategy.role != Strategy.ROLE_GOALIE or count_goalies):
poses.append(data.robot_position.pose)
return poses
def get_own_time_to_ball(self):
return self.own_time_to_ball
def team_data_callback(self, msg):
# Save team data
self.team_data[msg.robot_id] = msg
def publish_strategy(self):
"""Publish for team comm"""
self.strategy_sender.publish(self.strategy)
def publish_time_to_ball(self):
self.time_to_ball_publisher.publish(self.own_time_to_ball)
def get_teammate_ball_seen_time(self):
"""Returns the time at which a teammate has seen the ball accurately enough"""
teammate_ball = self.get_teammate_ball()
if teammate_ball is not None:
return teammate_ball.header.stamp
else:
return rospy.Time(0)
def teammate_ball_is_valid(self):
"""Returns true if a teammate has seen the ball accurately enough"""
return self.get_teammate_ball() is not None
def get_teammate_ball(self):
"""Returns the ball from the closest teammate that has accurate enough localization and ball precision"""
def std_dev_from_covariance(covariance):
x_sdev = covariance[0] # position 0,0 in a 6x6-matrix
y_sdev = covariance[7] # position 1,1 in a 6x6-matrix
theta_sdev = covariance[35] # position 5,5 in a 6x6-matrix
return x_sdev, y_sdev, theta_sdev
best_robot_dist = 9999
best_ball = None
for robot_name, single_teamdata in self.team_data.items():
if not self.is_valid(single_teamdata):
continue
ball = single_teamdata.ball_absolute
ball_x_std_dev, ball_y_std_dev, _ = std_dev_from_covariance(ball.covariance)
robot = single_teamdata.robot_position
robot_x_std_dev, robot_y_std_dev, robot_theta_std_dev = std_dev_from_covariance(robot.covariance)
stamp = single_teamdata.header.stamp
if rospy.Time.now() - stamp < self.ball_lost_time:
if ball_x_std_dev < self.ball_max_covariance and ball_y_std_dev < self.ball_max_covariance:
if robot_x_std_dev < self.pose_precision_threshold['x_sdev'] and \
robot_y_std_dev < self.pose_precision_threshold['y_sdev'] and \
robot_theta_std_dev < self.pose_precision_threshold['theta_sdev']:
robot_dist = self.get_robot_ball_euclidian_distance(single_teamdata)
if robot_dist < best_robot_dist:
best_ball = PointStamped()
best_ball.header = single_teamdata.header
best_ball.point.x = single_teamdata.ball_absolute.pose.position.x
best_ball.point.y = single_teamdata.ball_absolute.pose.position.y
best_robot_dist = robot_dist
return best_ball
|
|
# Copyright 2020-2022 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time, pytz
import json
from datetime import datetime
from datetime import timedelta
from google.cloud import firestore
from google.cloud import tasks_v2
from google.protobuf import timestamp_pb2
#@firestore.transactional requires function to be at the module level
@firestore.transactional
def update_doc_in_transaction(transaction, doc, update_dict):
print('*** enter update_doc_in_transaction ***')
# apply update if document is unchanged
doc_ref = doc.reference
snapshot = doc_ref.get(transaction=transaction)
if doc.update_time == snapshot.update_time:
transaction.update(doc_ref, update_dict)
#log
return True
else:
#log
return False
class TagScheduler:
"""Class for managing scheduled tasks to update
project = my-project
region = us-central1
queue_name = tag-engine
app_engine_uri = task handler uri set inside the
app engine project hosting the cloud task queue
stale_timer = age of PENDING tasks that gets reset to READY (in minutes)
"""
def __init__(self,
tag_engine_project,
queue_region,
queue_name,
app_engine_uri,
stale_time=10):
self.tag_engine_project = tag_engine_project
self.queue_region = queue_region
self.queue_name = queue_name
self.app_engine_uri = app_engine_uri
self.stale_time = stale_time
self.db = firestore.Client()
##################### API METHODS ############
def scan_for_update_jobs(self):
print('*** enter scan_for_update_jobs ***')
db = firestore.Client()
tag_ref = db.collection('tag_config')
tag_ref = tag_ref.where("refresh_mode", "==", "AUTO")
tag_ref = tag_ref.where("scheduling_status", "==", "READY")
tag_ref = tag_ref.where("config_status", "==", "ACTIVE")
tag_ref = tag_ref.where("next_run", "<=", datetime.utcnow())
ready_configs = list(tag_ref.stream())
print('ready_configs: ' + str(ready_configs))
#TODO: consider running transactions async
for config in ready_configs:
print('found tag config to refresh')
transaction = self.db.transaction()
payload = self._set_status_pending(transaction, config)
if payload:
doc_id = payload[0]
version = payload[1]
print('doc_id: ' + doc_id)
print('version: ' + str(version))
response = self._send_cloud_task(doc_id, version)
print('send_cloud_task response: ' + str(response))
#log success
else:
pass
print('invalid payload')
#log fail
return True
def reset_stale_jobs(self):
print('*** enter reset_stale_jobs ***')
tag_ref = self.db.collection("tag_config")
tag_ref = tag_ref.where("scheduling_status", "==", "PENDING")
tag_ref = tag_ref.where("config_status", "==", "ACTIVE")
pending_configs = list(tag_ref.stream())
for config in pending_configs:
udt = config.update_time.replace(tzinfo=pytz.UTC)
ts = datetime.utcnow().replace(tzinfo=pytz.UTC)
if (udt + timedelta(minutes=self.stale_time)) < ts:
print('found a stale config')
self._set_status_ready(config)
return True
def schedule_job(self, doc_id):
print('*** enter schedule_job ***')
collection = self.db.collection("tag_config")
tag_config = collection.document(doc_id).get()
response = self._set_status_ready(tag_config)
print('response: ' + str(response))
#Log
def get_config_and_template(self, doc_id):
print('*** enter get_config_and_template ***')
tag_config = self.db.collection('tag_config').document(doc_id).get()
template_id = tag_config.get('template_uuid')
template_config = self.db\
.collection('tag_template').document(template_id).get()
return tag_config, template_config
#End get_doc_snapshot
################ INTERNAL PROCESSING METHODS #################
def _set_status_ready(self, doc):
print('*** enter _set_status_ready ***')
doc_ref = doc.reference
snapshot = doc_ref.get()
data = snapshot.to_dict()
transaction = self.db.transaction()
task = {
'scheduling_status':'READY',
}
return update_doc_in_transaction(transaction, doc, task)
def _set_status_pending(self, transaction, doc):
print('*** enter _set_status_pending ***')
data = doc.to_dict()
version = data.get('version', 0) + 1
delta = data.get('refresh_frequency', 24)
unit = data.get('refresh_unit', 'hours')
if unit == 'hours':
next_run = datetime.utcnow() + timedelta(hours=delta)
if unit == 'days':
next_run = datetime.utcnow() + timedelta(days=delta)
print('version: ' + str(version))
print('delta: ' + str(delta))
print('next_run: ' + str(next_run))
task = {
'version': version,
'scheduling_status':'PENDING',
'next_run' : next_run
}
if update_doc_in_transaction(transaction, doc, task):
return doc.id, version
else:
return None
def _send_cloud_task(self, doc_id, version):
print('*** enter _send_cloud_task ***')
client = tasks_v2.CloudTasksClient()
parent = client.queue_path(self.tag_engine_project, self.queue_region, self.queue_name)
task = {
'app_engine_http_request': {
'http_method': tasks_v2.HttpMethod.POST,
'relative_uri': self.app_engine_uri
}
}
task['app_engine_http_request']['headers'] = {'Content-type': 'application/json'}
payload = {'doc_id': doc_id, 'version': version}
print('payload: ' + str(payload))
payload_utf8 = json.dumps(payload).encode()
task['app_engine_http_request']['body'] = payload_utf8
response = client.create_task(parent=parent, task=task)
print('response: ' + str(response))
return response
if __name__ == '__main__':
config = configparser.ConfigParser()
config.read("tagengine.ini")
project = config['DEFAULT']['TAG_ENGINE_PROJECT']
region = config['DEFAULT']['QUEUE_REGION']
queue_name = config['DEFAULT']['SCHEDULER_QUEUE']
app_engine_uri = '/_dynamic_auto_update'
ts = TagScheduler(project, region, queue_name, app_engine_uri)
ts.reset_stale_jobs()
ts.scan_for_update_jobs()
print('done')
|
|
"""Support for Tado sensors for each zone."""
import logging
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_WINDOW,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import (
DATA,
DOMAIN,
SIGNAL_TADO_UPDATE_RECEIVED,
TYPE_AIR_CONDITIONING,
TYPE_BATTERY,
TYPE_HEATING,
TYPE_HOT_WATER,
TYPE_POWER,
)
from .entity import TadoDeviceEntity, TadoZoneEntity
_LOGGER = logging.getLogger(__name__)
DEVICE_SENSORS = {
TYPE_BATTERY: [
"battery state",
"connection state",
],
TYPE_POWER: [
"connection state",
],
}
ZONE_SENSORS = {
TYPE_HEATING: [
"power",
"link",
"overlay",
"early start",
"open window",
],
TYPE_AIR_CONDITIONING: [
"power",
"link",
"overlay",
"open window",
],
TYPE_HOT_WATER: ["power", "link", "overlay"],
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
):
"""Set up the Tado sensor platform."""
tado = hass.data[DOMAIN][entry.entry_id][DATA]
devices = tado.devices
zones = tado.zones
entities = []
# Create device sensors
for device in devices:
if "batteryState" in device:
device_type = TYPE_BATTERY
else:
device_type = TYPE_POWER
entities.extend(
[
TadoDeviceBinarySensor(tado, device, variable)
for variable in DEVICE_SENSORS[device_type]
]
)
# Create zone sensors
for zone in zones:
zone_type = zone["type"]
if zone_type not in ZONE_SENSORS:
_LOGGER.warning("Unknown zone type skipped: %s", zone_type)
continue
entities.extend(
[
TadoZoneBinarySensor(tado, zone["name"], zone["id"], variable)
for variable in ZONE_SENSORS[zone_type]
]
)
if entities:
async_add_entities(entities, True)
class TadoDeviceBinarySensor(TadoDeviceEntity, BinarySensorEntity):
"""Representation of a tado Sensor."""
def __init__(self, tado, device_info, device_variable):
"""Initialize of the Tado Sensor."""
self._tado = tado
super().__init__(device_info)
self.device_variable = device_variable
self._unique_id = f"{device_variable} {self.device_id} {tado.home_id}"
self._state = None
async def async_added_to_hass(self):
"""Register for sensor updates."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_TADO_UPDATE_RECEIVED.format(
self._tado.home_id, "device", self.device_id
),
self._async_update_callback,
)
)
self._async_update_device_data()
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.device_name} {self.device_variable}"
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of this sensor."""
if self.device_variable == "battery state":
return DEVICE_CLASS_BATTERY
if self.device_variable == "connection state":
return DEVICE_CLASS_CONNECTIVITY
return None
@callback
def _async_update_callback(self):
"""Update and write state."""
self._async_update_device_data()
self.async_write_ha_state()
@callback
def _async_update_device_data(self):
"""Handle update callbacks."""
try:
self._device_info = self._tado.data["device"][self.device_id]
except KeyError:
return
if self.device_variable == "battery state":
self._state = self._device_info["batteryState"] == "LOW"
elif self.device_variable == "connection state":
self._state = self._device_info.get("connectionState", {}).get(
"value", False
)
class TadoZoneBinarySensor(TadoZoneEntity, BinarySensorEntity):
"""Representation of a tado Sensor."""
def __init__(self, tado, zone_name, zone_id, zone_variable):
"""Initialize of the Tado Sensor."""
self._tado = tado
super().__init__(zone_name, tado.home_id, zone_id)
self.zone_variable = zone_variable
self._unique_id = f"{zone_variable} {zone_id} {tado.home_id}"
self._state = None
self._state_attributes = None
self._tado_zone_data = None
async def async_added_to_hass(self):
"""Register for sensor updates."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_TADO_UPDATE_RECEIVED.format(
self._tado.home_id, "zone", self.zone_id
),
self._async_update_callback,
)
)
self._async_update_zone_data()
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.zone_name} {self.zone_variable}"
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of this sensor."""
if self.zone_variable == "early start":
return DEVICE_CLASS_POWER
if self.zone_variable == "link":
return DEVICE_CLASS_CONNECTIVITY
if self.zone_variable == "open window":
return DEVICE_CLASS_WINDOW
if self.zone_variable == "overlay":
return DEVICE_CLASS_POWER
if self.zone_variable == "power":
return DEVICE_CLASS_POWER
return None
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return self._state_attributes
@callback
def _async_update_callback(self):
"""Update and write state."""
self._async_update_zone_data()
self.async_write_ha_state()
@callback
def _async_update_zone_data(self):
"""Handle update callbacks."""
try:
self._tado_zone_data = self._tado.data["zone"][self.zone_id]
except KeyError:
return
if self.zone_variable == "power":
self._state = self._tado_zone_data.power == "ON"
elif self.zone_variable == "link":
self._state = self._tado_zone_data.link == "ONLINE"
elif self.zone_variable == "overlay":
self._state = self._tado_zone_data.overlay_active
if self._tado_zone_data.overlay_active:
self._state_attributes = {
"termination": self._tado_zone_data.overlay_termination_type
}
elif self.zone_variable == "early start":
self._state = self._tado_zone_data.preparation
elif self.zone_variable == "open window":
self._state = bool(
self._tado_zone_data.open_window
or self._tado_zone_data.open_window_detected
)
self._state_attributes = self._tado_zone_data.open_window_attr
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Security Group action implementations"""
import argparse
import six
from openstackclient.common import utils
from openstackclient.i18n import _
from openstackclient.identity import common as identity_common
from openstackclient.network import common
from openstackclient.network import utils as network_utils
def _format_network_security_group_rules(sg_rules):
# For readability and to align with formatting compute security group
# rules, trim keys with caller known (e.g. security group and tenant ID)
# or empty values.
for sg_rule in sg_rules:
empty_keys = [k for k, v in six.iteritems(sg_rule) if not v]
for key in empty_keys:
sg_rule.pop(key)
sg_rule.pop('security_group_id', None)
sg_rule.pop('tenant_id', None)
return utils.format_list_of_dicts(sg_rules)
def _format_compute_security_group_rule(sg_rule):
info = network_utils.transform_compute_security_group_rule(sg_rule)
# Trim parent security group ID since caller has this information.
info.pop('parent_group_id', None)
# Trim keys with empty string values.
keys_to_trim = [
'ip_protocol',
'ip_range',
'port_range',
'remote_security_group',
]
for key in keys_to_trim:
if key in info and not info[key]:
info.pop(key)
return utils.format_dict(info)
def _format_compute_security_group_rules(sg_rules):
rules = []
for sg_rule in sg_rules:
rules.append(_format_compute_security_group_rule(sg_rule))
return utils.format_list(rules, separator='\n')
_formatters_network = {
'security_group_rules': _format_network_security_group_rules,
}
_formatters_compute = {
'rules': _format_compute_security_group_rules,
}
def _get_columns(item):
# Build the display columns and a list of the property columns
# that need to be mapped (display column name, property name).
columns = list(item.keys())
property_column_mappings = []
if 'security_group_rules' in columns:
columns.append('rules')
columns.remove('security_group_rules')
property_column_mappings.append(('rules', 'security_group_rules'))
if 'tenant_id' in columns:
columns.append('project_id')
columns.remove('tenant_id')
property_column_mappings.append(('project_id', 'tenant_id'))
display_columns = sorted(columns)
# Build the property columns and apply any column mappings.
property_columns = sorted(columns)
for property_column_mapping in property_column_mappings:
property_index = property_columns.index(property_column_mapping[0])
property_columns[property_index] = property_column_mapping[1]
return tuple(display_columns), property_columns
class CreateSecurityGroup(common.NetworkAndComputeShowOne):
"""Create a new security group"""
def update_parser_common(self, parser):
parser.add_argument(
"name",
metavar="<name>",
help=_("New security group name")
)
parser.add_argument(
"--description",
metavar="<description>",
help=_("Security group description")
)
return parser
def update_parser_network(self, parser):
parser.add_argument(
'--project',
metavar='<project>',
help=_("Owner's project (name or ID)")
)
identity_common.add_project_domain_option_to_parser(parser)
return parser
def _get_description(self, parsed_args):
if parsed_args.description is not None:
return parsed_args.description
else:
return parsed_args.name
def take_action_network(self, client, parsed_args):
# Build the create attributes.
attrs = {}
attrs['name'] = parsed_args.name
attrs['description'] = self._get_description(parsed_args)
if parsed_args.project is not None:
identity_client = self.app.client_manager.identity
project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
attrs['tenant_id'] = project_id
# Create the security group and display the results.
obj = client.create_security_group(**attrs)
display_columns, property_columns = _get_columns(obj)
data = utils.get_item_properties(
obj,
property_columns,
formatters=_formatters_network
)
return (display_columns, data)
def take_action_compute(self, client, parsed_args):
description = self._get_description(parsed_args)
obj = client.security_groups.create(
parsed_args.name,
description,
)
display_columns, property_columns = _get_columns(obj._info)
data = utils.get_dict_properties(
obj._info,
property_columns,
formatters=_formatters_compute
)
return (display_columns, data)
class DeleteSecurityGroup(common.NetworkAndComputeCommand):
"""Delete a security group"""
def update_parser_common(self, parser):
parser.add_argument(
'group',
metavar='<group>',
help=_("Security group to delete (name or ID)")
)
return parser
def take_action_network(self, client, parsed_args):
obj = client.find_security_group(parsed_args.group)
client.delete_security_group(obj)
def take_action_compute(self, client, parsed_args):
data = utils.find_resource(
client.security_groups,
parsed_args.group,
)
client.security_groups.delete(data.id)
class ListSecurityGroup(common.NetworkAndComputeLister):
"""List security groups"""
def update_parser_network(self, parser):
# Maintain and hide the argument for backwards compatibility.
# Network will always return all projects for an admin.
parser.add_argument(
'--all-projects',
action='store_true',
default=False,
help=argparse.SUPPRESS,
)
return parser
def update_parser_compute(self, parser):
parser.add_argument(
'--all-projects',
action='store_true',
default=False,
help=_("Display information from all projects (admin only)")
)
return parser
def _get_return_data(self, data, include_project=True):
columns = (
"ID",
"Name",
"Description",
)
column_headers = columns
if include_project:
columns = columns + ('Tenant ID',)
column_headers = column_headers + ('Project',)
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in data))
def take_action_network(self, client, parsed_args):
return self._get_return_data(client.security_groups())
def take_action_compute(self, client, parsed_args):
search = {'all_tenants': parsed_args.all_projects}
data = client.security_groups.list(search_opts=search)
return self._get_return_data(data,
include_project=parsed_args.all_projects)
class SetSecurityGroup(common.NetworkAndComputeCommand):
"""Set security group properties"""
def update_parser_common(self, parser):
parser.add_argument(
'group',
metavar='<group>',
help=_("Security group to modify (name or ID)")
)
parser.add_argument(
'--name',
metavar='<new-name>',
help=_("New security group name")
)
parser.add_argument(
"--description",
metavar="<description>",
help=_("New security group description")
)
return parser
def take_action_network(self, client, parsed_args):
obj = client.find_security_group(parsed_args.group,
ignore_missing=False)
attrs = {}
if parsed_args.name is not None:
attrs['name'] = parsed_args.name
if parsed_args.description is not None:
attrs['description'] = parsed_args.description
# NOTE(rtheis): Previous behavior did not raise a CommandError
# if there were no updates. Maintain this behavior and issue
# the update.
client.update_security_group(obj, **attrs)
def take_action_compute(self, client, parsed_args):
data = utils.find_resource(
client.security_groups,
parsed_args.group,
)
if parsed_args.name is not None:
data.name = parsed_args.name
if parsed_args.description is not None:
data.description = parsed_args.description
# NOTE(rtheis): Previous behavior did not raise a CommandError
# if there were no updates. Maintain this behavior and issue
# the update.
client.security_groups.update(
data,
data.name,
data.description,
)
class ShowSecurityGroup(common.NetworkAndComputeShowOne):
"""Display security group details"""
def update_parser_common(self, parser):
parser.add_argument(
'group',
metavar='<group>',
help=_("Security group to display (name or ID)")
)
return parser
def take_action_network(self, client, parsed_args):
obj = client.find_security_group(parsed_args.group,
ignore_missing=False)
display_columns, property_columns = _get_columns(obj)
data = utils.get_item_properties(
obj,
property_columns,
formatters=_formatters_network
)
return (display_columns, data)
def take_action_compute(self, client, parsed_args):
obj = utils.find_resource(
client.security_groups,
parsed_args.group,
)
display_columns, property_columns = _get_columns(obj._info)
data = utils.get_dict_properties(
obj._info,
property_columns,
formatters=_formatters_compute
)
return (display_columns, data)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str,
service_name: str,
group_id: str,
subscription_id: str,
*,
filter: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[int] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}/users')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
"groupId": _SERIALIZER.url("group_id", group_id, 'str', max_length=256, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = _SERIALIZER.query("skip", skip, 'int', minimum=0)
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_check_entity_exists_request(
resource_group_name: str,
service_name: str,
group_id: str,
user_id: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}/users/{userId}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
"groupId": _SERIALIZER.url("group_id", group_id, 'str', max_length=256, min_length=1),
"userId": _SERIALIZER.url("user_id", user_id, 'str', max_length=80, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="HEAD",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request(
resource_group_name: str,
service_name: str,
group_id: str,
user_id: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}/users/{userId}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
"groupId": _SERIALIZER.url("group_id", group_id, 'str', max_length=256, min_length=1),
"userId": _SERIALIZER.url("user_id", user_id, 'str', max_length=80, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request(
resource_group_name: str,
service_name: str,
group_id: str,
user_id: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}/users/{userId}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
"groupId": _SERIALIZER.url("group_id", group_id, 'str', max_length=256, min_length=1),
"userId": _SERIALIZER.url("user_id", user_id, 'str', max_length=80, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class GroupUserOperations(object):
"""GroupUserOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~api_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
service_name: str,
group_id: str,
filter: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[int] = None,
**kwargs: Any
) -> Iterable["_models.UserCollection"]:
"""Lists a collection of user entities associated with the group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param group_id: Group identifier. Must be unique in the current API Management service
instance.
:type group_id: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>| firstName
| filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>|
lastName | filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith
|</br>| email | filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith
|</br>| registrationDate | filter | ge, le, eq, ne, gt, lt | |</br>| note | filter | ge,
le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UserCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~api_management_client.models.UserCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UserCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
service_name=service_name,
group_id=group_id,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
service_name=service_name,
group_id=group_id,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("UserCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}/users'} # type: ignore
@distributed_trace
def check_entity_exists(
self,
resource_group_name: str,
service_name: str,
group_id: str,
user_id: str,
**kwargs: Any
) -> bool:
"""Checks that user entity specified by identifier is associated with the group entity.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param group_id: Group identifier. Must be unique in the current API Management service
instance.
:type group_id: str
:param user_id: User identifier. Must be unique in the current API Management service instance.
:type user_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_check_entity_exists_request(
resource_group_name=resource_group_name,
service_name=service_name,
group_id=group_id,
user_id=user_id,
subscription_id=self._config.subscription_id,
template_url=self.check_entity_exists.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_entity_exists.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}/users/{userId}'} # type: ignore
@distributed_trace
def create(
self,
resource_group_name: str,
service_name: str,
group_id: str,
user_id: str,
**kwargs: Any
) -> "_models.UserContract":
"""Add existing user to existing group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param group_id: Group identifier. Must be unique in the current API Management service
instance.
:type group_id: str
:param user_id: User identifier. Must be unique in the current API Management service instance.
:type user_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UserContract, or the result of cls(response)
:rtype: ~api_management_client.models.UserContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UserContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_create_request(
resource_group_name=resource_group_name,
service_name=service_name,
group_id=group_id,
user_id=user_id,
subscription_id=self._config.subscription_id,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('UserContract', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('UserContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}/users/{userId}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
service_name: str,
group_id: str,
user_id: str,
**kwargs: Any
) -> None:
"""Remove existing user from existing group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param group_id: Group identifier. Must be unique in the current API Management service
instance.
:type group_id: str
:param user_id: User identifier. Must be unique in the current API Management service instance.
:type user_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
service_name=service_name,
group_id=group_id,
user_id=user_id,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}/users/{userId}'} # type: ignore
|
|
'''
Created on Jul 18, 2011
@author: sean
'''
from ...asttools import Visitor, visit_children
import _ast
from ...asttools.visitors.symbol_visitor import get_symbols
try:
from networkx import DiGraph
except ImportError:
DiGraph = None
def collect_(self, node):
names = set()
for child in self.children(node):
names.update(self.visit(child))
if hasattr(node, 'ctx'):
if isinstance(node.ctx, _ast.Store):
self.modified.update(names)
elif isinstance(node.ctx, _ast.Load):
self.used.update(names)
return names
class CollectNodes(Visitor):
def __init__(self, call_deps=False):
self.graph = DiGraph()
self.modified = set()
self.used = set()
self.undefined = set()
self.sources = set()
self.targets = set()
self.context_names = set()
self.call_deps = call_deps
visitDefault = collect_
def visitName(self, node):
if isinstance(node.ctx, _ast.Store):
self.modified.add(node.id)
elif isinstance(node.ctx, _ast.Load):
self.used.update(node.id)
if not self.graph.has_node(node.id):
self.graph.add_node(node.id)
if isinstance(node.ctx, _ast.Load):
self.undefined.add(node.id)
for ctx_var in self.context_names:
if not self.graph.has_edge(node.id, ctx_var):
self.graph.add_edge(node.id, ctx_var)
return {node.id}
def visitalias(self, node):
name = node.asname if node.asname else node.name
if '.' in name:
name = name.split('.', 1)[0]
if not self.graph.has_node(name):
self.graph.add_node(name)
return {name}
def visitCall(self, node):
left = self.visit(node.func)
right = set()
for attr in ('args', 'keywords'):
for child in getattr(node, attr):
if child:
right.update(self.visit(child))
for attr in ('starargs', 'kwargs'):
child = getattr(node, attr)
if child:
right.update(self.visit(child))
for src in left | right:
if not self.graph.has_node(src):
self.undefined.add(src)
if self.call_deps:
add_edges(self.graph, left, right)
add_edges(self.graph, right, left)
right.update(left)
return right
def visitSubscript(self, node):
if isinstance(node.ctx, _ast.Load):
return collect_(self, node)
else:
sources = self.visit(node.slice)
targets = self.visit(node.value)
self.modified.update(targets)
add_edges(self.graph, targets, sources)
return targets
def handle_generators(self, generators):
defined = set()
required = set()
for generator in generators:
get_symbols(generator, _ast.Load)
required.update(get_symbols(generator, _ast.Load) - defined)
defined.update(get_symbols(generator, _ast.Store))
return defined, required
def visitListComp(self, node):
defined, required = self.handle_generators(node.generators)
required.update(get_symbols(node.elt, _ast.Load) - defined)
for symbol in required:
if not self.graph.has_node(symbol):
self.graph.add_node(symbol)
self.undefined.add(symbol)
return required
def visitSetComp(self, node):
defined, required = self.handle_generators(node.generators)
required.update(get_symbols(node.elt, _ast.Load) - defined)
for symbol in required:
if not self.graph.has_node(symbol):
self.graph.add_node(symbol)
self.undefined.add(symbol)
return required
def visitDictComp(self, node):
defined, required = self.handle_generators(node.generators)
required.update(get_symbols(node.key, _ast.Load) - defined)
required.update(get_symbols(node.value, _ast.Load) - defined)
for symbol in required:
if not self.graph.has_node(symbol):
self.graph.add_node(symbol)
self.undefined.add(symbol)
return required
def add_edges(graph, targets, sources):
for target in targets:
for src in sources:
edge = target, src
if not graph.has_edge(*edge):
graph.add_edge(*edge)
class GlobalDeps(object):
def __init__(self, gen, nodes):
self.nodes = nodes
self.gen = gen
def __enter__(self):
self._old_context_names = set(self.gen.context_names)
self.gen.context_names.update(self.nodes)
def __exit__(self, *args):
self.gen.context_names = self._old_context_names
class GraphGen(CollectNodes):
'''
Create a graph from the execution flow of the ast
'''
visitModule = visit_children
def depends_on(self, nodes):
return GlobalDeps(self, set(nodes))
def visit_lambda(self, node):
sources = self.visit(node.args)
self.sources.update(sources)
self.visit(node.body)
def visitLambda(self, node):
gen = GraphGen()
gen.visit_lambda(node)
for undef in gen.undefined:
if not self.graph.has_node(undef):
self.graph.add_node(undef)
return gen.undefined
def visit_function_def(self, node):
sources = self.visit(node.args)
self.sources.update(sources)
for stmnt in node.body:
self.visit(stmnt)
def visitFunctionDef(self, node):
gen = GraphGen()
gen.visit_function_def(node)
if not self.graph.has_node(node.name):
self.graph.add_node(node.name)
for undef in gen.undefined:
if not self.graph.has_node(undef):
self.graph.add_node(undef)
add_edges(self.graph, [node.name], gen.undefined)
return gen.undefined
def visitAssign(self, node):
nodes = self.visit(node.value)
tsymols = get_symbols(node, _ast.Store)
re_defined = tsymols.intersection(set(self.graph.nodes()))
if re_defined:
add_edges(self.graph, re_defined, re_defined)
targets = set()
for target in node.targets:
targets.update(self.visit(target))
add_edges(self.graph, targets, nodes)
return targets | nodes
def visitAugAssign(self, node):
targets = self.visit(node.target)
values = self.visit(node.value)
self.modified.update(targets)
for target in targets:
for value in values:
edge = target, value
if not self.graph.has_edge(*edge):
self.graph.add_edge(*edge)
for tgt2 in targets:
edge = target, tgt2
if not self.graph.has_edge(*edge):
self.graph.add_edge(*edge)
return targets | values
def visitFor(self, node):
nodes = set()
targets = self.visit(node.target)
for_iter = self.visit(node.iter)
nodes.update(targets)
nodes.update(for_iter)
add_edges(self.graph, targets, for_iter)
with self.depends_on(for_iter):
for stmnt in node.body:
nodes.update(self.visit(stmnt))
return nodes
def visitIf(self, node):
nodes = set()
names = self.visit(node.test)
nodes.update(names)
with self.depends_on(names):
for stmnt in node.body:
nodes.update(self.visit(stmnt))
for stmnt in node.orelse:
nodes.update(self.visit(stmnt))
return nodes
def visitReturn(self, node):
targets = self.visit(node.value)
self.targets.update(targets)
return targets
def visitWith(self, node):
nodes = set()
targets = self.visit(node.context_expr)
nodes.update(targets)
if node.optional_vars is None:
vars = ()
else:
vars = self.visit(node.optional_vars)
nodes.update(vars)
add_edges(self.graph, vars, targets)
with self.depends_on(targets):
for stmnt in node.body:
nodes.update(self.visit(stmnt))
return nodes
def visitWhile(self, node):
nodes = set()
targets = self.visit(node.test)
nodes.update(targets)
with self.depends_on(targets):
for stmnt in node.body:
nodes.update(self.visit(stmnt))
for stmnt in node.orelse:
nodes.update(self.visit(stmnt))
return nodes
def visitTryFinally(self, node):
assert len(node.body) == 1
nodes = self.visit(node.body[0])
with self.depends_on(nodes):
for stmnt in node.finalbody:
nodes.update(self.visit(stmnt))
def visitTryExcept(self, node):
body_nodes = set()
for stmnt in node.body:
body_nodes.update(self.visit(stmnt))
all_nodes = set(body_nodes)
for hndlr in node.handlers:
nodes = set(body_nodes)
if hndlr.name:
nodes.update(self.visit(hndlr.name))
if hndlr.type:
nodes.update(self.visit(hndlr.type))
with self.depends_on(nodes):
for stmnt in hndlr.body:
nodes.update(self.visit(stmnt))
all_nodes.update(nodes)
nodes = set(body_nodes)
with self.depends_on(nodes):
for stmnt in node.orelse:
nodes.update(self.visit(stmnt))
all_nodes.update(nodes)
return all_nodes
def make_graph(node, call_deps=False):
'''
Create a dependency graph from an ast node.
:param node: ast node.
:param call_deps: if true, then the graph will create a cyclic dependance for all
function calls. (i.e for `a.b(c)` a depends on b and b depends on a)
:returns: a tuple of (graph, undefined)
'''
gen = GraphGen(call_deps=call_deps)
gen.visit(node)
return gen.graph, gen.undefined
|
|
"""Test zha sensor."""
from unittest import mock
import pytest
import zigpy.zcl.clusters.general as general
import zigpy.zcl.clusters.homeautomation as homeautomation
import zigpy.zcl.clusters.measurement as measurement
import zigpy.zcl.clusters.smartenergy as smartenergy
from homeassistant.components.sensor import DOMAIN
import homeassistant.config as config_util
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_UNIT_SYSTEM,
CONF_UNIT_SYSTEM_IMPERIAL,
CONF_UNIT_SYSTEM_METRIC,
POWER_WATT,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
UNIT_PERCENTAGE,
)
from homeassistant.helpers import restore_state
from homeassistant.util import dt as dt_util
from .common import (
async_enable_traffic,
async_test_rejoin,
find_entity_id,
send_attribute_report,
send_attributes_report,
)
async def async_test_humidity(hass, cluster, entity_id):
"""Test humidity sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 1000, 2: 100})
assert_state(hass, entity_id, "10.0", UNIT_PERCENTAGE)
async def async_test_temperature(hass, cluster, entity_id):
"""Test temperature sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 2900, 2: 100})
assert_state(hass, entity_id, "29.0", TEMP_CELSIUS)
async def async_test_pressure(hass, cluster, entity_id):
"""Test pressure sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 1000, 2: 10000})
assert_state(hass, entity_id, "1000", "hPa")
await send_attributes_report(hass, cluster, {0: 1000, 20: -1, 16: 10000})
assert_state(hass, entity_id, "1000", "hPa")
async def async_test_illuminance(hass, cluster, entity_id):
"""Test illuminance sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 10, 2: 20})
assert_state(hass, entity_id, "1.0", "lx")
async def async_test_metering(hass, cluster, entity_id):
"""Test metering sensor."""
await send_attributes_report(hass, cluster, {1025: 1, 1024: 12345, 1026: 100})
assert_state(hass, entity_id, "12345.0", "unknown")
async def async_test_electrical_measurement(hass, cluster, entity_id):
"""Test electrical measurement sensor."""
with mock.patch(
(
"homeassistant.components.zha.core.channels.homeautomation"
".ElectricalMeasurementChannel.divisor"
),
new_callable=mock.PropertyMock,
) as divisor_mock:
divisor_mock.return_value = 1
await send_attributes_report(hass, cluster, {0: 1, 1291: 100, 10: 1000})
assert_state(hass, entity_id, "100", POWER_WATT)
await send_attributes_report(hass, cluster, {0: 1, 1291: 99, 10: 1000})
assert_state(hass, entity_id, "99", POWER_WATT)
divisor_mock.return_value = 10
await send_attributes_report(hass, cluster, {0: 1, 1291: 1000, 10: 5000})
assert_state(hass, entity_id, "100", POWER_WATT)
await send_attributes_report(hass, cluster, {0: 1, 1291: 99, 10: 5000})
assert_state(hass, entity_id, "9.9", POWER_WATT)
@pytest.mark.parametrize(
"cluster_id, test_func, report_count",
(
(measurement.RelativeHumidity.cluster_id, async_test_humidity, 1),
(measurement.TemperatureMeasurement.cluster_id, async_test_temperature, 1),
(measurement.PressureMeasurement.cluster_id, async_test_pressure, 1),
(measurement.IlluminanceMeasurement.cluster_id, async_test_illuminance, 1),
(smartenergy.Metering.cluster_id, async_test_metering, 1),
(
homeautomation.ElectricalMeasurement.cluster_id,
async_test_electrical_measurement,
1,
),
),
)
async def test_sensor(
hass,
zigpy_device_mock,
zha_device_joined_restored,
cluster_id,
test_func,
report_count,
):
"""Test zha sensor platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [cluster_id, general.Basic.cluster_id],
"out_cluster": [],
"device_type": 0x0000,
}
}
)
cluster = zigpy_device.endpoints[1].in_clusters[cluster_id]
zha_device = await zha_device_joined_restored(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
await async_enable_traffic(hass, [zha_device], enabled=False)
await hass.async_block_till_done()
# ensure the sensor entity was created
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and devices
await async_enable_traffic(hass, [zha_device])
# test that the sensor now have a state of unknown
assert hass.states.get(entity_id).state == STATE_UNKNOWN
# test sensor associated logic
await test_func(hass, cluster, entity_id)
# test rejoin
await async_test_rejoin(hass, zigpy_device, [cluster], (report_count,))
def assert_state(hass, entity_id, state, unit_of_measurement):
"""Check that the state is what is expected.
This is used to ensure that the logic in each sensor class handled the
attribute report it received correctly.
"""
hass_state = hass.states.get(entity_id)
assert hass_state.state == state
assert hass_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == unit_of_measurement
@pytest.fixture
def hass_ms(hass):
"""Hass instance with measurement system."""
async def _hass_ms(meas_sys):
await config_util.async_process_ha_core_config(
hass, {CONF_UNIT_SYSTEM: meas_sys}
)
await hass.async_block_till_done()
return hass
return _hass_ms
@pytest.fixture
def core_rs(hass_storage):
"""Core.restore_state fixture."""
def _storage(entity_id, uom, state):
now = dt_util.utcnow().isoformat()
hass_storage[restore_state.STORAGE_KEY] = {
"version": restore_state.STORAGE_VERSION,
"key": restore_state.STORAGE_KEY,
"data": [
{
"state": {
"entity_id": entity_id,
"state": str(state),
"attributes": {ATTR_UNIT_OF_MEASUREMENT: uom},
"last_changed": now,
"last_updated": now,
"context": {
"id": "3c2243ff5f30447eb12e7348cfd5b8ff",
"user_id": None,
},
},
"last_seen": now,
}
],
}
return
return _storage
@pytest.mark.parametrize(
"uom, raw_temp, expected, restore",
[
(TEMP_CELSIUS, 2900, 29, False),
(TEMP_CELSIUS, 2900, 29, True),
(TEMP_FAHRENHEIT, 2900, 84, False),
(TEMP_FAHRENHEIT, 2900, 84, True),
],
)
async def test_temp_uom(
uom,
raw_temp,
expected,
restore,
hass_ms,
core_rs,
zigpy_device_mock,
zha_device_restored,
):
"""Test zha temperature sensor unit of measurement."""
entity_id = "sensor.fake1026_fakemodel1026_004f3202_temperature"
if restore:
core_rs(entity_id, uom, state=(expected - 2))
hass = await hass_ms(
CONF_UNIT_SYSTEM_METRIC if uom == TEMP_CELSIUS else CONF_UNIT_SYSTEM_IMPERIAL
)
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
measurement.TemperatureMeasurement.cluster_id,
general.Basic.cluster_id,
],
"out_cluster": [],
"device_type": 0x0000,
}
}
)
cluster = zigpy_device.endpoints[1].temperature
zha_device = await zha_device_restored(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
if not restore:
await async_enable_traffic(hass, [zha_device], enabled=False)
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and devices
await async_enable_traffic(hass, [zha_device])
# test that the sensors now have a state of unknown
if not restore:
assert hass.states.get(entity_id).state == STATE_UNKNOWN
await send_attribute_report(hass, cluster, 0, raw_temp)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert round(float(state.state)) == expected
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == uom
async def test_electrical_measurement_init(
hass, zigpy_device_mock, zha_device_joined,
):
"""Test proper initialization of the electrical measurement cluster."""
cluster_id = homeautomation.ElectricalMeasurement.cluster_id
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [cluster_id, general.Basic.cluster_id],
"out_cluster": [],
"device_type": 0x0000,
}
}
)
cluster = zigpy_device.endpoints[1].in_clusters[cluster_id]
zha_device = await zha_device_joined(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
# allow traffic to flow through the gateway and devices
await async_enable_traffic(hass, [zha_device])
# test that the sensor now have a state of unknown
assert hass.states.get(entity_id).state == STATE_UNKNOWN
await send_attributes_report(hass, cluster, {0: 1, 1291: 100, 10: 1000})
assert int(hass.states.get(entity_id).state) == 100
channel = zha_device.channels.pools[0].all_channels["1:0x0b04"]
assert channel.divisor == 1
assert channel.multiplier == 1
# update power divisor
await send_attributes_report(hass, cluster, {0: 1, 1291: 20, 0x0403: 5, 10: 1000})
assert channel.divisor == 5
assert channel.multiplier == 1
assert hass.states.get(entity_id).state == "4.0"
await send_attributes_report(hass, cluster, {0: 1, 1291: 30, 0x0605: 10, 10: 1000})
assert channel.divisor == 10
assert channel.multiplier == 1
assert hass.states.get(entity_id).state == "3.0"
# update power multiplier
await send_attributes_report(hass, cluster, {0: 1, 1291: 20, 0x0402: 6, 10: 1000})
assert channel.divisor == 10
assert channel.multiplier == 6
assert hass.states.get(entity_id).state == "12.0"
await send_attributes_report(hass, cluster, {0: 1, 1291: 30, 0x0604: 20, 10: 1000})
assert channel.divisor == 10
assert channel.multiplier == 20
assert hass.states.get(entity_id).state == "60.0"
|
|
import os, sys
import pygame
from utils import Util
from pygame.locals import *
from threading import Timer
import threading
from sprite import *
import random
import sound
class RockSpanTimer(threading.Thread):
def __init__(self, controller, interval):
threading.Thread.__init__(self)
self.event = threading.Event()
self.controller = controller
self.interval = interval
def run(self):
while not self.event.is_set():
""" The things I want to do go here. """
self.controller.rock_spawner()
self.event.wait(self.interval)
def stop(self):
self.event.set()
class Controller:
WIDTH = 800
HEIGHT = 600
def __init__(self):
self.screen = pygame.display.get_surface()
self.area = self.screen.get_rect()
self.lives = 3
self.score = 0
self.started = False
self.splash, tmp = Util.load_image('splash.png')
self.splash_pos = [self.screen.get_rect().width / 2 - tmp.width / 2, \
self.screen.get_rect().height / 2 - tmp.height / 2]
self.ship = Ship([400, 300], [0, 0], 0)
self.allships = pygame.sprite.RenderPlain((self.ship,))
self.rock_group = pygame.sprite.Group()
self.missile_group = pygame.sprite.Group()
self.explosion_group = pygame.sprite.Group()
self.timer = RockSpanTimer(self, 1.5)
def new_game(self):
self.lives = 3
self.score = 0
self.started = True
self.ship = Ship([400, 300], [0, 0], 0)
self.ship.set_game_status(True)
self.allships = pygame.sprite.RenderPlain((self.ship,))
self.rock_group = pygame.sprite.Group()
self.missile_group = pygame.sprite.Group()
self.explosion_group = pygame.sprite.Group()
sound.soundtrack.play()
self.timer = RockSpanTimer(self, 1.5)
self.timer.start()
def game_over(self):
self.timer.stop()
sound.soundtrack.stop()
self.ship.set_game_status(False)
self.ship.set_thrust(False)
self.started = False
self.rock_group.empty()
self.missile_group.empty()
self.explosion_group.empty()
def event_handler(self, event):
if self.started == False and event.type == MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
inwidth = (400 - 400 / 2) < pos[0] < (400 + 400 / 2)
inheight = (300 - 300 / 2) < pos[1] < (300 + 300 / 2)
if (not self.started) and inwidth and inheight:
print 'new game started'
self.new_game()
if event.type == QUIT:
self.timer.stop()
sys.exit()
elif event.type == KEYDOWN and event.key == K_ESCAPE:
self.timer.stop()
sys.exit()
if self.started != True:
return
elif event.type == KEYDOWN and event.key == K_UP:
self.ship.set_thrust(True)
elif event.type == KEYDOWN and event.key == K_LEFT:
self.ship.increment_angle_vel()
elif event.type == KEYDOWN and event.key == K_RIGHT:
self.ship.decrement_angle_vel()
elif event.type == KEYUP and event.key == K_UP:
self.ship.set_thrust(False)
elif event.type == KEYUP and event.key == K_LEFT:
self.ship.decrement_angle_vel()
elif event.type == KEYUP and event.key == K_RIGHT:
self.ship.increment_angle_vel()
elif event.type == KEYUP and event.key == K_SPACE:
missile = self.ship.shoot()
self.missile_group.add(missile)
def rock_spawner(self):
if not self.started:
return
if len(self.rock_group.sprites()) >= Rock.LIMIT:
return
rock_pos = [random.randrange(0, self.area.width), random.randrange(0, self.area.height)]
rock_vel = [random.random() * 1.3 - .3, random.random() * 1.3 - .3]
rock_angle_vel = random.random() * 1.0 - .1
add_vel = self.score * 0.5 + 1
rock_vel = [rock_vel[0] * add_vel, rock_vel[1] * add_vel]
rock = Rock(rock_pos, rock_vel, rock_angle_vel)
distance = Util.dist(rock.rect.center, self.ship.rect.center)
if distance < 200:
return
self.rock_group.add(rock)
def update(self):
self.allships.update()
for missile in self.missile_group.sprites():
if missile.update() == True:
missile.kill()
self.rock_group.update()
# check for collision
rocks_hit_list = pygame.sprite.spritecollide(self.ship, self.rock_group, True)
for rock in rocks_hit_list:
# TODO: play explosion
center = rock.rect.center
explosion = Explosion(center)
self.explosion_group.add(explosion)
self.lives -= 1
if self.lives == 0:
self.game_over()
return
missile_rock_collision = pygame.sprite.groupcollide(self.missile_group, self.rock_group, True, True)
for missile, rocks in missile_rock_collision.iteritems():
num = len(rocks)
self.score += num
for rock in rocks:
center = rock.rect.center
explosion = Explosion(center)
self.explosion_group.add(explosion)
for explosion in self.explosion_group.sprites():
if explosion.update() == True:
explosion.kill()
def draw(self):
self.allships.draw(self.screen)
self.missile_group.draw(self.screen)
self.rock_group.draw(self.screen)
self.explosion_group.draw(self.screen)
if self.started == False:
self.screen.blit(self.splash, self.splash_pos)
def main():
pygame.init()
os.environ['SDL_VIDEO_CENTERED'] = '1' # center the window
screen = pygame.display.set_mode((Controller.WIDTH, Controller.HEIGHT))
pygame.display.set_caption('Aircraft watkinsong@163.com')
pygame.mouse.set_visible(1)
controller = Controller()
bg, bg_rect = Util.load_image('nebula_blue.f2014.png')
bg = bg.convert()
screen.blit(bg, (0, 0))
pygame.display.flip()
dubris, dubris_rect = Util.load_image('debris2_blue.png')
screen.blit(dubris, (0, 0))
pygame.display.flip()
#Prepare Game Objects
clock = pygame.time.Clock()
#Main Loop
counter = 0
while 1:
counter += 1
clock.tick(60)
for event in pygame.event.get():
controller.event_handler(event)
screen.blit(bg, (0, 0))
wtime = (counter / 4) % screen.get_rect().width
screen.blit(dubris, (wtime, 0))
draw_text(screen, controller)
controller.update()
controller.draw()
pygame.display.flip()
def draw_text(screen, controller):
livefont = pygame.font.SysFont("Times", 25)
label = livefont.render('lives', 1, (255, 255, 255))
screen.blit(label, (50, 30))
livefont = pygame.font.SysFont("Times", 25)
label = livefont.render('score', 1, (255, 255, 255))
screen.blit(label, (720, 30))
livefont = pygame.font.SysFont("Times", 25)
label = livefont.render(str(controller.lives), 1, (255, 255, 255))
screen.blit(label, (50, 60))
livefont = pygame.font.SysFont("Times", 25)
label = livefont.render(str(controller.score), 1, (255, 255, 255))
screen.blit(label, (720, 60))
if __name__ == '__main__':
main()
|
|
import numpy as np
from numpy.random import normal
from namedlist import namedlist
from collections import namedtuple, ChainMap
pi = np.pi
pi2 = 2*pi
__all__ = ['SyntheticECG']
def get_respiratory_phase(num_samples, sampling_rate, frequency=15.0/60.0, stdev_factor=0.05):
"""
Returns:
array[num_samples]: the phase (as func of time)
"""
w = pi2 * frequency
# Use sqrt to properly rescale Gaussian process.
dw = np.sqrt(w * stdev_factor)
dt = 1/np.float64(sampling_rate)
sqdt = np.sqrt(dt)
t = dt * np.arange(num_samples)
phi_init = pi2 * np.random.rand()
phase = phi_init + t*w + dw*sqdt*np.random.randn(num_samples).cumsum()
return phase
class SyntheticECGGenerator:
""" Generate synthetic ECG Signals
>>> get_signal = synthetic.SyntheticECG()
>>> signal = get_signal()
>>> time, input_, target = signal
Paper: P. McSharry el. al.,
IEEE TRANSACTIONS ON BIOMEDICAL ENGINEERING, VOL. 50, NO. 3, MARCH 2003
Args:
sampling_rate: samples per second (default 250)
num_samples: number of samples to generate on call (default 5000)
heart_rate: heartrate in beats per second (default 60/60)
hr_stdev_factor: fraction of 'heart_rate' as its variability (default 0.05)
respiration_rate: respiration rate in beats per second (default 15/60)
rr_stdev_factor: fraction of 'respiration_rate' as its variability (default 0.2)
EKG_noise_strength: standard deviation of additive noise (default 0.05)
EKG_fluctuation_strength: stdev factor for variability of EKG waves (default 1)
RESP_noise_strength: stdev of additive noise added to respiration signal (default 0.1)
esk_strength: ESK coupling stength, the EKG signal varies with
1 + esk_strength * respiration (default 0.1)
rsa_strength: strength of the respiratory sinus arrhythmia (default 1.0)
rsa_dispersion: slope of sensitivity increase at the RSA-sensitive part in the EKG (default 0.1).
rsa_width_shift: width of the RSA-sensitive part in the EKG (default 0.0).
seed: random seed to be passed to numpy.random.seed (default None)
kwargs:
Additional parameters can modify the WaveParameter for 'P', 'Q', 'R', 'S', and 'T' waves.
It should have the form '<Wave>_<parameter>=<value>'.
parameter:
- a: Amplitude of the wave
- b: Half width of the peak in radian.
- theta: Phase of the peak in radian.
- esk: electrostatic coupling of the peak to RESP.
- da, db, dtheta: standard deviation of peak-to-peak variability of above parameters.
"""
Signal = namedtuple("SyntheticEKG", ["input", "target"])
WaveParameter = namedlist(
"Parameter", ["a", "b", "theta", "esk",
"da", "db", "dtheta"])
def __init__(self,
sampling_rate=250,
num_samples=5000,
heart_rate=60.0/60.0,
hr_stdev_factor=0.03,
respiration_rate=15.0/60.0,
rr_stdev_factor=0.2,
EKG_noise_strength=0.05,
EKG_fluctuation_strength=0.2,
RESP_noise_strength=0.1,
esk_strength=0.1,
rsa_strength=1.0,
rsa_dispersion=0.1,
rsa_width_shift=0.0,
seed=None,
**kwargs):
self.sampling_rate = sampling_rate
self._hr_stdev_factor = hr_stdev_factor
self.heart_rate = heart_rate
self.respiration_rate = respiration_rate
self.rr_stdev_factor = rr_stdev_factor
self.EKG_noise_strength = EKG_noise_strength
self.EKG_fluctuation_strength = EKG_fluctuation_strength
self.RESP_noise_strength = RESP_noise_strength
self.esk_strength = esk_strength
self.rsa_strength = rsa_strength
self.rsa_width_shift = rsa_width_shift
self.rsa_dispersion = rsa_dispersion
self.num_samples = num_samples
self.seed = seed
self.WAVE_PARAMETERS = {
"P": self.WaveParameter(a= .25, b=pi2*.04, theta=-pi/3, esk= .5, da=0.05, db=pi2*0.002, dtheta=pi2*0.03),
"Q": self.WaveParameter(a=-.20, b=pi2*.01, theta=-pi/12, esk=-.5, da=0.02, db=pi2*0.001, dtheta=pi2*0.03),
"R": self.WaveParameter(a=2.20, b=pi2*.015, theta=0, esk= .5, da=.15, db=pi2*0.002, dtheta=pi2*0.03),
"S": self.WaveParameter(a=-.15, b=pi2*.01, theta=pi/12, esk=-.5, da=0.02, db=pi2*0.001, dtheta=pi2*0.03),
"T": self.WaveParameter(a= .60, b=pi2*.06, theta=pi/1.7, esk= .5, da=0.1, db=pi2*0.002, dtheta=pi2*0.03)
}
for k, v in kwargs.items():
wp_tuple = k.split('_')
if wp_tuple[0] in self.WAVE_PARAMETERS:
wname, pname = wp_tuple
self.set_wave_param(wname, pname, v)
@property
def seed(self):
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
np.random.seed(seed)
@property
def heart_rate(self):
return self._heart_rate
@heart_rate.setter
def heart_rate(self, hr):
self._heart_rate = hr
self.w_heart = pi2 * hr
self.hr_stdev_factor = self._hr_stdev_factor
@property
def hr_stdev_factor(self):
return self._hr_stdev_factor
@hr_stdev_factor.setter
def hr_stdev_factor(self, dhr_fac):
self._hr_stdev_factor = dhr_fac
# Use sqrt to properly rescale Gaussian process.
self.dw_heart = np.sqrt(self.w_heart * dhr_fac)
def set_wave_param(self, wave_name, param_name, val):
setattr(self.WAVE_PARAMETERS[wave_name], param_name, val)
def phase_deriv(self, theta, resp_state):
"""Derivative of the heartbeat phase
Args:
theta: heartbeat phase
resp_state: state of the respiratory cycle (-1, 1).
Negative values decelerate, and positive values
accelerate the heart beat.
General form:
tht' = w + Q(tht, R)
where R is the respiratory oscillation.
Coupling function Q
Q(tht, R) = strength R(t) / (1+exp((cos(tht)+shift)/width))
"""
Q = self.rsa_strength/(1+np.exp((np.cos(theta)+self.rsa_width_shift)/self.rsa_dispersion)) * resp_state
return self.w_heart + Q
def EKG_from_phase(self, phase, RESP=None):
"""Computes EKG from a heartbeat phase timeseries
Args:
phase: numpy.ndarray, heartbeat phase.
RESP: numpy.ndarray, respiratory oscillation in (-1, 1).
RESP modulates the amplitude of each EKG wave with an absolute
strength self.esk_strength, and a wave-specific contribution esk.
"""
if RESP is None:
RESP = np.zeros_like(phase, dtype=np.float64)
assert phase.size == RESP.size
# Local namespace is sometimes faster, often better readable
esk_strg = self.esk_strength
wavep = self.WAVE_PARAMETERS
fluc_strg = self.EKG_fluctuation_strength
EKG = np.zeros_like(phase, dtype=np.float64)
for peak_idx in range(int(min(phase) / pi2) - 10, int(max(phase) / pi2) + 10):
for a_i, b_i, tht_i, esk, da, db, dtheta in iter(wavep.values()):
a = normal(a_i, fluc_strg * da)
b = normal(b_i, fluc_strg * db)
tht = normal(tht_i, fluc_strg * dtheta)
dtht = phase - tht - peak_idx * pi2
EKG += (1+esk_strg*esk*RESP) * a * np.exp(-dtht**2 / (2*b**2))
return EKG
def show_single_trajectory(self, show=False):
import matplotlib.pyplot as plt
trajectory = self.heartbeat_trajectory()
heart_phase = trajectory[:, 0]
EKG = trajectory[:, 1]
RESP = trajectory[:, 2]
fig = plt.figure(figsize=(10, 6))
ax = plt.subplot(211)
plt.plot(EKG)
plt.subplot(212, sharex=ax)
plt.plot(RESP)
if show: plt.show()
def get_resp_phase(self, num_samples):
return get_respiratory_phase(num_samples, self.sampling_rate, self.respiration_rate, self.rr_stdev_factor)
def heartbeat_trajectory(self):
dt = 1./np.float64(self.sampling_rate)
f = self.phase_deriv
N = self.num_samples
R = np.cos(self.get_resp_phase(N))
dW = self.dw_heart * np.sqrt(dt) * np.random.randn(N)
x = np.zeros((N), np.float64)
x[0] = pi2*np.random.rand()
for n in range(1, N):
x[n] = x[n-1] + dt * f(x[n-1], R[n-1]) + dW[n]
EKG = self.EKG_from_phase(x, R)
trajectory = np.transpose(np.vstack((x, EKG, R)))
return trajectory
def __call__(self):
heartbeat_trajectory = self.heartbeat_trajectory()
EKG = heartbeat_trajectory[:, 1]
RESP = heartbeat_trajectory[:, 2]
EKG += normal(0.0, self.EKG_noise_strength, size=EKG.size)
RESP += normal(0.0, self.RESP_noise_strength, size=RESP.size)
return self.Signal(input=EKG, target=RESP)
if __name__ == "__main__":
N = 20 * 250
gen = SyntheticECGGenerator(sampling_rate=250, num_samples=N, rsa_strength=1)
gen.show_single_trajectory(show=True)
|
|
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:10332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:10332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitaltyn address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitaltyn address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
# Copyright (c) 2013-2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Server-side (i.e. worker side) classes and logic.
"""
import datetime
import functools
try:
import newrelic.agent
from newrelic.api import application
newrelic_loaded = True
except ImportError:
newrelic_loaded = False
from oslo_service import service
from barbican.common import config
from barbican.common import utils
from barbican import i18n as u
from barbican.model import models
from barbican.model import repositories
from barbican import queue
from barbican.tasks import common
from barbican.tasks import resources
if newrelic_loaded:
newrelic.agent.initialize('/etc/newrelic/newrelic.ini')
LOG = utils.getLogger(__name__)
CONF = config.CONF
# Maps the common/shared RetryTasks (returned from lower-level business logic
# and plugin processing) to top-level RPC tasks in the Tasks class below.
MAP_RETRY_TASKS = {
common.RetryTasks.INVOKE_CERT_STATUS_CHECK_TASK: 'check_certificate_status'
}
def retryable_order(fn):
"""Provides retry/scheduling support to Order-related tasks."""
@functools.wraps(fn)
def wrapper(method_self, *args, **kwargs):
result = fn(method_self, *args, **kwargs)
retry_rpc_method = schedule_order_retry_tasks(
fn, result, *args, **kwargs)
if retry_rpc_method:
LOG.info(
u._LI("Scheduled RPC method for retry: '%s'"),
retry_rpc_method)
return wrapper
def transactional(fn):
"""Provides request-scoped database transaction support to tasks."""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
fn_name = getattr(fn, '__name__', '????')
if not queue.is_server_side():
# Non-server mode directly invokes tasks.
fn(*args, **kwargs)
LOG.info(u._LI("Completed worker task: '%s'"), fn_name)
else:
# Manage session/transaction.
try:
fn(*args, **kwargs)
repositories.commit()
LOG.info(u._LI("Completed worker task: '%s'"), fn_name)
except Exception:
"""NOTE: Wrapped functions must process with care!
Exceptions that reach here will revert the entire transaction,
including any updates made to entities such as setting error
codes and error messages.
"""
LOG.exception(
u._LE("Problem seen processing worker task: '%s'"),
fn_name
)
repositories.rollback()
finally:
repositories.clear()
return wrapper
def monitored(fn): # pragma: no cover
"""Provides monitoring capabilities for task methods."""
# TODO(jvrbanac): Figure out how we should test third-party monitoring
# Support NewRelic Monitoring
if newrelic_loaded:
# Create a NewRelic app instance
app = application.application_instance()
def newrelic_wrapper(*args, **kwargs):
# Resolve real name since decorators are wrapper the method
if len(args) > 0 and hasattr(args[0], fn.__name__):
cls = type(args[0])
task_name = '{0}:{1}.{2}'.format(
cls.__module__,
cls.__name__,
fn.__name__
)
else:
task_name = newrelic.agent.callable_name(fn)
# Execute task under a monitored context
with newrelic.agent.BackgroundTask(app, task_name):
fn(*args, **kwargs)
return newrelic_wrapper
return fn
def schedule_order_retry_tasks(
invoked_task, retry_result, context, *args, **kwargs):
"""Schedules an Order-related task for retry.
:param invoked_task: The RPC method that was just invoked.
:param retry_result: A :class:`FollowOnProcessingStatusDTO` if follow-on
processing (such as retrying this or another task) is
required, otherwise None indicates no such follow-on
processing is required.
:param context: Queue context, not used.
:param order_id: ID of the Order entity the task to retry is for.
:param args: List of arguments passed in to the just-invoked task.
:param kwargs: Dict of arguments passed in to the just-invoked task.
:return: Returns the RPC task method scheduled for a retry, None if no RPC
task was scheduled.
"""
retry_rpc_method = None
order_id = kwargs.get('order_id')
if not retry_result or not order_id:
pass
elif common.RetryTasks.INVOKE_SAME_TASK == retry_result.retry_task:
if invoked_task:
retry_rpc_method = getattr(
invoked_task, '__name__', None)
else:
retry_rpc_method = MAP_RETRY_TASKS.get(retry_result.retry_task)
if retry_rpc_method:
LOG.debug(
'Scheduling RPC method for retry: {0}'.format(retry_rpc_method))
date_to_retry_at = datetime.datetime.utcnow() + datetime.timedelta(
milliseconds=retry_result.retry_msec)
retry_model = models.OrderRetryTask()
retry_model.order_id = order_id
retry_model.retry_task = retry_rpc_method
retry_model.retry_at = date_to_retry_at
retry_model.retry_args = args
retry_model.retry_kwargs = kwargs
retry_model.retry_count = 0
retry_repo = repositories.get_order_retry_tasks_repository()
retry_repo.create_from(retry_model)
return retry_rpc_method
class Tasks(object):
"""Tasks that can be invoked asynchronously in Barbican.
Only place task methods and implementations on this class, as they can be
called directly from the client side for non-asynchronous standalone
single-node operation.
If a new method is added that can be retried, please also add its method
name to MAP_RETRY_TASKS above.
The TaskServer class below extends this class to implement a worker-side
server utilizing Oslo messaging's RPC server. This RPC server can invoke
methods on itself, which include the methods in this class.
"""
@monitored
@transactional
@retryable_order
def process_type_order(self, context, order_id, project_id):
"""Process TypeOrder."""
LOG.info(
u._LI("Processing type order: order ID is '%s'"),
order_id
)
return resources.BeginTypeOrder().process_and_suppress_exceptions(
order_id, project_id)
@monitored
@transactional
@retryable_order
def update_order(self, context, order_id, project_id, updated_meta):
"""Update Order."""
LOG.info(
u._LI("Processing update order: order ID is '%s'"),
order_id
)
return resources.UpdateOrder().process_and_suppress_exceptions(
order_id, project_id, updated_meta)
@monitored
@transactional
@retryable_order
def check_certificate_status(self, context, order_id, project_id):
"""Check the status of a certificate order."""
LOG.info(
u._LI("Processing check certificate status on order: order ID is "
"'%s'"),
order_id
)
check_cert_order = resources.CheckCertificateStatusOrder()
return check_cert_order.process_and_suppress_exceptions(
order_id, project_id)
class TaskServer(Tasks, service.Service):
"""Server to process asynchronous tasking from Barbican API nodes.
This server is an Oslo service that exposes task methods that can
be invoked from the Barbican API nodes. It delegates to an Oslo
RPC messaging server to invoke methods asynchronously on this class.
Since this class also extends the Tasks class above, its task-based
methods are hence available to the RPC messaging server.
"""
def __init__(self):
super(TaskServer, self).__init__()
# Setting up db engine to avoid lazy initialization
repositories.setup_database_engine_and_factory()
# This property must be defined for the 'endpoints' specified below,
# as the oslo_messaging RPC server will ask for it.
self.target = queue.get_target()
# Create an oslo RPC server, that calls back on to this class
# instance to invoke tasks, such as 'process_order()' on the
# extended Tasks class above.
self._server = queue.get_server(target=self.target,
endpoints=[self])
def start(self):
LOG.info(u._LI("Starting the TaskServer"))
self._server.start()
super(TaskServer, self).start()
def stop(self):
LOG.info(u._LI("Halting the TaskServer"))
super(TaskServer, self).stop()
self._server.stop()
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Proto utility module containing helper functions.
The module handles tasks related to protobufs in word2act:
1. encodes word2act action and time_step into tf.train.Example proto2.
2. parses screeninfo protobuf into feature dictionary.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import numpy as np
from PIL import Image
import tensorflow.compat.v1 as tf
from seq2act.data_generation import string_utils
from seq2act.data_generation import view_hierarchy
def get_feature_dict(screen_info_proto, padding_shape=None, lower_case=False):
"""Gets screen feature dictionary from screen_info protobuf.
Args:
screen_info_proto: protobuf defined in word2act/proto/rehearsal.proto.
Contains screenshot and xml
padding_shape: The shape of padding size for final feature list. shape =
(max_object_num, max_word_num, max_word_length) If the shape is not given,
then returns the original list without padding.
lower_case: lower case all the ui texts.
Returns:
A feature dictionary. If padding_shape is not None, all values of the
dictionary are padded. The shape after padding is shown as 'shape = ...'.
Otherwise, shapes of values are not a fixed value.
screenshot: numpy array of screen_info_proto.screenshot
'ui_obj_str_seq': uiobject's name/content_descriotion/resource_id, numpy
array of strings.
'ui_obj_word_id_seq': encoded word sequence, np int array, shape =
(max_object_num, max_word_num)
'ui_obj_char_id_seq': encoded char sequence, np int array, shape =
(max_object_num, max_word_num, max_word_length)
'ui_obj_type_seq': type sequence, np int array, shape = (max_object_num,)
'ui_obj_clickable_seq': clickable sequence, np int array, shape =
(max_object_num,)
'ui_obj_cord_x_seq': x cordinate sequence, np int array, shape =
(max_object_num*2,)
'ui_obj_cord_y_seq': y cordinate sequence, np int array, shape =
(max_object_num*2,)
'ui_obj_v_distance': vertical relation matrix, np float array,
shape = (max_object_num, max_object_num)
'ui_obj_h_distance': horizontal relation matrix, np float array, shape =
(max_object_num, max_object_num)
'ui_obj_dom_distance': dom relation matrix, np int array, shape =
(max_object_num, max_object_num)
'ui_obj_dom_location_seq': dom index from tree traversal, np int array,
shape = (max_object_num*3,)
"""
screenshot = Image.open(io.BytesIO(screen_info_proto.screenshot.content))
screenshot = np.asarray(screenshot, np.float32)
vh = view_hierarchy.ViewHierarchy()
vh.load_xml(screen_info_proto.view_hierarchy.xml.encode('utf-8'))
view_hierarchy_leaf_nodes = vh.get_leaf_nodes()
ui_object_features_dict = get_ui_objects_feature_dict(
view_hierarchy_leaf_nodes, padding_shape, lower_case)
ui_object_features_dict['screenshot'] = screenshot
return ui_object_features_dict
def get_ui_objects_feature_dict(view_hierarchy_leaf_nodes,
padding_shape=None,
lower_case=False):
"""Gets ui object features dictionary from view hierarchy leaf nodes list.
Args:
view_hierarchy_leaf_nodes: A list of view hierarchy leaf node objects.
padding_shape: The shape of padding size for final feature list. shape =
(max_object_num, max_word_num, max_word_length) If the shape is not given,
then returns the original list without padding.
lower_case: lower case all the ui texts.
Returns:
A feature dictionary. If padding_shape is not None, all values of the
dictionary are padded. The shape after padding is shown as 'shape = ...'.
Otherwise, shapes of values are not a fixed value.
'ui_obj_type_seq': type sequence, np int array, shape = (max_object_num,)
'ui_obj_word_id_seq': encoded word sequence, np int array, shape =
(max_object_num, max_word_num)
'ui_obj_char_id_seq': encoded char sequence, np int array, shape =
(max_object_num, max_word_num, max_word_length)
'ui_obj_clickable_seq': clickable sequence, np int array, shape =
(max_object_num,)
'ui_obj_cord_x_seq': x cordinate sequence, np int array, shape =
(max_object_num*2,)
'ui_obj_cord_y_seq': y cordinate sequence, np int array, shape =
(max_object_num*2,)
'ui_obj_v_distance': vertical relation matrix, np float array, shape =
(max_object_num, max_object_num)
'ui_obj_h_distance': horizontal relation matrix, np float array, shape =
(max_object_num, max_object_num)
'ui_obj_dom_distance': dom relation matrix, np int array, shape =
(max_object_num, max_object_num)
'ui_obj_dom_location_seq': dom index from tree traversal, np int array,
shape = (max_object_num*3,)
'ui_obj_str_seq': uiobject's name/content_descriotion/resource_id,
numpy array of strings.
"""
ui_object_attributes = _get_ui_object_attributes(view_hierarchy_leaf_nodes,
lower_case)
vh_relations = get_view_hierarchy_leaf_relation(view_hierarchy_leaf_nodes)
if padding_shape is None:
merged_features = {}
for key in ui_object_attributes:
if key == 'obj_str_seq':
merged_features['ui_obj_str_seq'] = ui_object_attributes[key].copy()
else:
merged_features['ui_obj_' + key] = ui_object_attributes[key].copy()
for key in vh_relations:
merged_features['ui_obj_' + key] = vh_relations[key].copy()
return merged_features
else:
if not isinstance(padding_shape, tuple):
assert False, 'padding_shape %s is not a tuple.' % (str(padding_shape))
if len(padding_shape) != 3:
assert False, 'padding_shape %s contains not exactly 3 elements.' % (
str(padding_shape))
(max_object_num, max_word_num, _) = padding_shape
obj_feature_dict = {
'ui_obj_type_id_seq':
padding_array(ui_object_attributes['type_id_seq'], (max_object_num,),
-1),
'ui_obj_str_seq':
padding_array(
ui_object_attributes['obj_str_seq'], (max_object_num,),
padding_type=np.string_,
padding_value=''),
'ui_obj_word_id_seq':
padding_array(
ui_object_attributes['word_id_seq'],
(max_object_num, max_word_num),
padding_value=0),
'ui_obj_clickable_seq':
padding_array(ui_object_attributes['clickable_seq'],
(max_object_num,)),
'ui_obj_cord_x_seq':
padding_array(ui_object_attributes['cord_x_seq'],
(max_object_num * 2,)),
'ui_obj_cord_y_seq':
padding_array(ui_object_attributes['cord_y_seq'],
(max_object_num * 2,)),
'ui_obj_v_distance':
padding_array(vh_relations['v_distance'],
(max_object_num, max_object_num), 0, np.float32),
'ui_obj_h_distance':
padding_array(vh_relations['h_distance'],
(max_object_num, max_object_num), 0, np.float32),
'ui_obj_dom_distance':
padding_array(vh_relations['dom_distance'],
(max_object_num, max_object_num)),
'ui_obj_dom_location_seq':
padding_array(ui_object_attributes['dom_location_seq'],
(max_object_num * 3,)),
}
return obj_feature_dict
def _get_ui_object_attributes(view_hierarchy_leaf_nodes, lower_case=False):
"""Parses ui object informationn from a view hierachy leaf node list.
Args:
view_hierarchy_leaf_nodes: a list of view hierachy leaf nodes.
lower_case: lower case all the ui texts.
Returns:
An un-padded attribute dictionary as follow:
'type_id_seq': numpy array of ui object types from view hierarchy.
'word_id_seq': numpy array of encoding for words in ui object.
'char_id_seq': numpy array of encoding for words in ui object.
'clickable_seq': numpy array of ui object clickable status.
'cord_x_seq': numpy array of ui object x coordination.
'cord_y_seq': numpy array of ui object y coordination.
'dom_location_seq': numpy array of ui object depth, pre-order-traversal
index, post-order-traversal index.
'word_str_sequence': numpy array of ui object name strings.
"""
type_sequence = []
word_id_sequence = []
char_id_sequence = []
clickable_sequence = []
cord_x_sequence = []
cord_y_sequence = []
dom_location_sequence = []
obj_str_sequence = []
def _is_ascii(s):
return all(ord(c) < 128 for c in s)
for vh_node in view_hierarchy_leaf_nodes:
ui_obj = vh_node.uiobject
type_sequence.append(ui_obj.obj_type.value)
cord_x_sequence.append(ui_obj.bounding_box.x1)
cord_x_sequence.append(ui_obj.bounding_box.x2)
cord_y_sequence.append(ui_obj.bounding_box.y1)
cord_y_sequence.append(ui_obj.bounding_box.y2)
clickable_sequence.append(ui_obj.clickable)
dom_location_sequence.extend(ui_obj.dom_location)
valid_words = [w for w in ui_obj.word_sequence if _is_ascii(w)]
word_sequence = ' '.join(valid_words)
if lower_case:
word_sequence = word_sequence.lower()
obj_str_sequence.append(word_sequence)
word_ids, char_ids = string_utils.tokenize_to_ids(word_sequence)
word_id_sequence.append(word_ids)
char_id_sequence.append(char_ids)
ui_feature = {
'type_id_seq': np.array(type_sequence),
'word_id_seq': np.array(word_id_sequence),
'clickable_seq': np.array(clickable_sequence),
'cord_x_seq': np.array(cord_x_sequence),
'cord_y_seq': np.array(cord_y_sequence),
'dom_location_seq': np.array(dom_location_sequence),
'obj_str_seq': np.array(obj_str_sequence, dtype=np.str),
}
return ui_feature
def get_view_hierarchy_leaf_relation(view_hierarchy_leaf_nodes):
"""Calculates adjacency relation from list of view hierarchy leaf nodes.
Args:
view_hierarchy_leaf_nodes: a list of view hierachy leaf nodes.
Returns:
An un-padded feature dictionary as follow:
'v_distance': 2d numpy array of ui object vertical adjacency relation.
'h_distance': 2d numpy array of ui object horizontal adjacency relation.
'dom_distance': 2d numpy array of ui object dom adjacency relation.
"""
vh_node_num = len(view_hierarchy_leaf_nodes)
vertical_adjacency = np.zeros((vh_node_num, vh_node_num), dtype=np.float32)
horizontal_adjacency = np.zeros((vh_node_num, vh_node_num), dtype=np.float32)
dom_adjacency = np.zeros((vh_node_num, vh_node_num), dtype=np.int64)
for row in range(len(view_hierarchy_leaf_nodes)):
for column in range(len(view_hierarchy_leaf_nodes)):
if row == column:
h_dist = v_dist = dom_dist = 0
else:
node1 = view_hierarchy_leaf_nodes[row]
node2 = view_hierarchy_leaf_nodes[column]
h_dist, v_dist = node1.normalized_pixel_distance(node2)
dom_dist = node1.dom_distance(node2)
vertical_adjacency[row][column] = v_dist
horizontal_adjacency[row][column] = h_dist
dom_adjacency[row][column] = dom_dist
return {
'v_distance': vertical_adjacency,
'h_distance': horizontal_adjacency,
'dom_distance': dom_adjacency
}
def padding_dictionary(orig_dict, padding_shape_dict, padding_type_dict,
padding_value_dict):
"""Does padding for dictionary of array or numpy array.
Args:
orig_dict: Original dictionary.
padding_shape_dict: Dictionary of padding shape, keys are field names,
values are shape tuple
padding_type_dict: Dictionary of padding shape, keys are field names, values
are padded numpy type
padding_value_dict: Dictionary of padding shape, keys are field names,
values are shape tuple
Returns:
A padded dictionary.
"""
# Asserting the keys of the four dictionaries are exactly same.
assert (set(orig_dict.keys()) == set(padding_shape_dict.keys()) == set(
padding_type_dict.keys()) == set(padding_value_dict.keys()))
padded_dict = {}
for key in orig_dict:
if padding_shape_dict[key]:
padded_dict[key] = padding_array(orig_dict[key], padding_shape_dict[key],
padding_value_dict[key],
padding_type_dict[key])
else:
padded_dict[key] = np.array(orig_dict[key], dtype=padding_type_dict[key])
return padded_dict
def padding_array(orig_array,
padding_shape,
padding_value=0,
padding_type=np.int64):
"""Pads orig_array according to padding shape, number and type.
The dimension of final result is the smaller dimension between
orig_array.shape and padding_shape.
For example:
a = [[1,2],[3,4]]
padding_array(a, (3,3), 0, np.int64) = [[1, 2, 0], [3, 4, 0], [0, 0, 0]]
a = [[1,2,3,4],[5,6,7,8]]
padding_array(a, (3,3), 0, np.int64) = [[1, 2, 3], [5, 6, 7], [0, 0, 0]]
Args:
orig_array: The original array before padding.
padding_shape: The shape of padding.
padding_value: The number to be padded into new array.
padding_type: The data type to be padded into new array.
Returns:
A padded numpy array.
"""
# When padding type is string, we need to initialize target_array with object
# type first. And convert it back to np.string_ after _fill_array. Because
# after initialized, numpy string array cannot hold longer string.
# For example:
# >>> a = np.array([''], dtype = np.string_)
# >>> a
# array([''], dtype='|S1')
# >>> a[0] = 'foo'
# >>> a
# array(['f'], dtype='|S1')
if padding_type == np.string_:
used_pad_type = object
else:
used_pad_type = padding_type
target_array = np.full(
shape=padding_shape, fill_value=padding_value, dtype=used_pad_type)
_fill_array(orig_array, target_array)
if padding_type == np.string_:
target_array = target_array.astype(np.string_)
return target_array
def _fill_array(orig_array, target_array):
"""Fills elements from orig_array to target_array.
If any dimension of orig_array is larger than target_array, only fills the
array of their shared dimensions.
Args:
orig_array: original array that contains the filling numbers, could be numpy
array or python list.
target_array: target array that will be filled with original array numbers,
numpy array
Raises:
TypeError: if the target_array is not a numpy array
"""
if not isinstance(target_array, np.ndarray):
raise TypeError('target array is not numpy array')
if target_array.ndim == 1:
try:
orig_length = len(orig_array)
except TypeError:
tf.logging.exception(
'orig_array %s and target_array %s dimension not fit',
orig_array, target_array)
orig_length = 0
if len(target_array) < orig_length:
target_array[:] = orig_array[:len(target_array)]
else:
target_array[:orig_length] = orig_array
return
else:
for sub_orig, sub_target in zip(orig_array, target_array):
_fill_array(sub_orig, sub_target)
def features_to_tf_example(features):
"""Converts feature dictionary into tf.Example protobuf.
This function only supports to convert np.int and np.float array.
Args:
features: A feature dictionary. Keys are field names, values are np array.
Returns:
A tf.Example protobuf.
Raises:
ValueError: Feature dictionary's value field is not supported type.
"""
new_features = {}
for k, v in features.items():
if not isinstance(v, np.ndarray):
raise ValueError('Value field: %s is not numpy array' % str((k, v)))
v = v.flatten()
if np.issubdtype(v.dtype.type, np.string_):
new_features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
elif np.issubdtype(v.dtype.type, np.integer):
new_features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))
elif np.issubdtype(v.dtype.type, np.floating):
new_features[k] = tf.train.Feature(float_list=tf.train.FloatList(value=v))
else:
raise ValueError('Value for %s is not a recognized type; v: %s type: %s' %
(k, str(v[0]), str(type(v[0]))))
return tf.train.Example(features=tf.train.Features(feature=new_features))
|
|
import os
import shutil
from django.db import models
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models.signals import post_save, post_delete, class_prepared
from whoosh.fields import Schema, STORED, ID, KEYWORD, TEXT, DATETIME
from whoosh.index import create_in, open_dir, exists_in
from whoosh.qparser import QueryParser, MultifieldParser
try:
STORAGE_DIR = settings.WHOOSH_STORAGE_DIR
except AttributeError:
raise ImproperlyConfigured(u'Could not find WHOOSH_STORAGE_DIR setting. ' +
'Please make sure that you have added that setting.')
field_mapping = {
'AutoField': ID(unique=True, stored=True),
'BooleanField': STORED,
'CharField': TEXT(stored=True),
'CommaSeparatedIntegerField': STORED,
'DateField': ID,
'DateTimeField': DATETIME,
'DecimalField': STORED,
'EmailField': ID,
'FileField': ID,
'FilePathField': ID,
'FloatField': STORED,
'ImageField': ID,
'IntegerField': STORED,
'IPAddressField': ID,
'NullBooleanField': STORED,
'PositiveIntegerField': STORED,
'PositiveSmallIntegerField': STORED,
'SlugField': KEYWORD,
'SmallIntegerField': STORED,
'TextField': TEXT(stored=True),
'TimeField': ID,
'URLField': ID,
'ForeignKey': TEXT(stored=True),
}
class WhooshManager(models.Manager):
def __init__(self, *args, **kwargs):
self.default = args[0] if args else kwargs.pop("default", None)
self.fields = kwargs.pop('fields', []) + ['id']
self.real_time = kwargs.pop('real_time', True)
if not os.path.exists(STORAGE_DIR):
os.mkdir(STORAGE_DIR)
super().__init__()
# -----------------------------------------------------------
# BASIC OPERATIONS
# -----------------------------------------------------------
def contribute_to_class(self, model, name):
super().contribute_to_class(model, name)
class_prepared.connect(self.class_prepared_callback, sender=self.model)
def class_prepared_callback(self, sender, **kwargs):
self.__create_index(self.model, self.fields)
if self.real_time:
post_save.connect(self.post_save_callback, sender=self.model)
post_delete.connect(self.post_delete_callback, sender=self.model)
def post_save_callback(self, sender, instance, created, **kwargs):
dct = dict([(f, str(getattr(instance, f))) for f in self.fields])
index = open_dir(STORAGE_DIR)
writer = index.writer()
if created:
writer.add_document(**dct)
else:
writer.update_document(**dct)
writer.commit()
instance.on_save()
def post_delete_callback(self, sender, instance, **kwargs):
pass
def rebuild_index(self, model, instances):
if os.path.exists(STORAGE_DIR):
shutil.rmtree(STORAGE_DIR)
os.mkdir(STORAGE_DIR)
self.__create_index(model, self.fields)
for instance in instances:
self.post_save_callback(instance=instance, created=True, sender=None)
# -----------------------------------------------------------
# INDEX OPERATIONS
# -----------------------------------------------------------
@staticmethod
def get_keywords(field, item_id, num_terms=20):
index = open_dir(STORAGE_DIR)
with index.searcher() as searcher:
query = QueryParser('id', index.schema).parse(str(item_id))
results = searcher.search(query)
keywords = [keyword for keyword, score in results.key_terms(field, numterms=num_terms)]
return keywords
def get_more_like_this(self, field, item_id, limit=None):
index = open_dir(STORAGE_DIR)
with index.searcher() as searcher:
query = QueryParser('id', index.schema).parse(str(item_id))
results = searcher.search(query)
identities = results[0].more_like_this(field, top=limit)
ids = [r['id'] for r in identities]
return self.filter(id__in=ids)
# -----------------------------------------------------------
# QUERIES
# -----------------------------------------------------------
def query(self, field, query):
ids = self.__query_search(field, query)
return self.filter(id__in=ids)
def query_multifield(self, fields, query):
ids = self.__query_multifield_search(fields, query)
return self.filter(id__in=ids)
# HELPERS QUERIES
def query_list_and(self, field, query_list):
query = self.__list_to_query(query_list, 'AND')
return self.query(field, query)
def query_list_or(self, field, query_list):
query = self.__list_to_query(query_list, 'OR')
return self.query(field, query)
def query_multifield_dict(self, dict_data):
fields, query = self.__dict_to_query(dict_data)
return self.query_multifield(fields, query)
# PRIVATE METHODS
@staticmethod
def __create_index(model, fields):
if not exists_in(STORAGE_DIR):
schema_dict = {}
for field_name in fields:
field_type = model._meta.get_field(field_name).get_internal_type()
schema_dict[field_name] = field_mapping[field_type]
schema = Schema(**schema_dict)
create_in(STORAGE_DIR, schema)
@staticmethod
def __query_search(field, search, limit=None):
index = open_dir(STORAGE_DIR)
with index.searcher() as searcher:
query = QueryParser(field, index.schema).parse(str(search))
results = searcher.search(query, limit=limit)
ids = [r['id'] for r in results]
return ids
@staticmethod
def __query_multifield_search(fields, search, limit=None):
index = open_dir(STORAGE_DIR)
with index.searcher() as searcher:
query = MultifieldParser(fields, index.schema).parse(str(search))
results = searcher.search(query, limit=limit)
ids = [r['id'] for r in results]
return ids
@staticmethod
def __list_to_query(query_list, word):
and_or = " {} ".format(word)
return and_or.join(query_list)
@staticmethod
def __dict_to_query(dict_data):
fields = []
queries = []
for key, value in dict_data.items():
if value != '' and value is not None:
fields.append(key)
queries.append("{}:{}".format(key, value))
query = " ".join(queries)
return fields, query
|
|
# coding: utf-8
"""
Tts API
Description # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_tts.configuration import Configuration
class ProjectsCollection(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'projects': 'list[Project]',
'page': 'int',
'per_page': 'int',
'page_count': 'int',
'total_count': 'int'
}
attribute_map = {
'projects': 'projects',
'page': 'page',
'per_page': 'per_page',
'page_count': 'page_count',
'total_count': 'total_count'
}
def __init__(self, projects=None, page=None, per_page=None, page_count=None, total_count=None, local_vars_configuration=None): # noqa: E501
"""ProjectsCollection - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._projects = None
self._page = None
self._per_page = None
self._page_count = None
self._total_count = None
self.discriminator = None
if projects is not None:
self.projects = projects
if page is not None:
self.page = page
if per_page is not None:
self.per_page = per_page
if page_count is not None:
self.page_count = page_count
if total_count is not None:
self.total_count = total_count
@property
def projects(self):
"""Gets the projects of this ProjectsCollection. # noqa: E501
:return: The projects of this ProjectsCollection. # noqa: E501
:rtype: list[Project]
"""
return self._projects
@projects.setter
def projects(self, projects):
"""Sets the projects of this ProjectsCollection.
:param projects: The projects of this ProjectsCollection. # noqa: E501
:type: list[Project]
"""
self._projects = projects
@property
def page(self):
"""Gets the page of this ProjectsCollection. # noqa: E501
A number of the fetched page. # noqa: E501
:return: The page of this ProjectsCollection. # noqa: E501
:rtype: int
"""
return self._page
@page.setter
def page(self, page):
"""Sets the page of this ProjectsCollection.
A number of the fetched page. # noqa: E501
:param page: The page of this ProjectsCollection. # noqa: E501
:type: int
"""
self._page = page
@property
def per_page(self):
"""Gets the per_page of this ProjectsCollection. # noqa: E501
A number of projects per page. # noqa: E501
:return: The per_page of this ProjectsCollection. # noqa: E501
:rtype: int
"""
return self._per_page
@per_page.setter
def per_page(self, per_page):
"""Sets the per_page of this ProjectsCollection.
A number of projects per page. # noqa: E501
:param per_page: The per_page of this ProjectsCollection. # noqa: E501
:type: int
"""
self._per_page = per_page
@property
def page_count(self):
"""Gets the page_count of this ProjectsCollection. # noqa: E501
A number of pages. # noqa: E501
:return: The page_count of this ProjectsCollection. # noqa: E501
:rtype: int
"""
return self._page_count
@page_count.setter
def page_count(self, page_count):
"""Sets the page_count of this ProjectsCollection.
A number of pages. # noqa: E501
:param page_count: The page_count of this ProjectsCollection. # noqa: E501
:type: int
"""
self._page_count = page_count
@property
def total_count(self):
"""Gets the total_count of this ProjectsCollection. # noqa: E501
A number of all projects. # noqa: E501
:return: The total_count of this ProjectsCollection. # noqa: E501
:rtype: int
"""
return self._total_count
@total_count.setter
def total_count(self, total_count):
"""Sets the total_count of this ProjectsCollection.
A number of all projects. # noqa: E501
:param total_count: The total_count of this ProjectsCollection. # noqa: E501
:type: int
"""
self._total_count = total_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProjectsCollection):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ProjectsCollection):
return True
return self.to_dict() != other.to_dict()
|
|
# Mini-project #6 - Blackjack
import simplegui
import random
# load card sprite - 936x384 - source: jfitz.com
CARD_SIZE = (72, 96)
CARD_CENTER = (36, 48)
card_images = simplegui.load_image("http://storage.googleapis.com/codeskulptor-assets/cards_jfitz.png")
CARD_BACK_SIZE = (72, 96)
CARD_BACK_CENTER = (36, 48)
card_back = simplegui.load_image("http://storage.googleapis.com/codeskulptor-assets/card_jfitz_back.png")
# draw subsequent cards with this offset
CARD_OFFSET = (24, 0)
# initialize some useful global variables
deck = None
dealer = None
player = None
in_play = False
outcome = 'Hit or stand?'
score = 0
canvas = None
frame = None
# define globals for cards
SUITS = ('C', 'S', 'H', 'D')
RANKS = ('A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K')
VALUES = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'T':10, 'J':10, 'Q':10, 'K':10}
# other constants
BUST = 22
DEALER_STAND = 17
# define card class
class Card:
def __init__(self, suit, rank):
if (suit in SUITS) and (rank in RANKS):
self.suit = suit
self.rank = rank
self.face_down = False
else:
self.suit = None
self.rank = None
print "Invalid card: ", suit, rank
def __str__(self):
return self.suit + self.rank
def get_suit(self):
return self.suit
def get_rank(self):
return self.rank
def draw(self, canvas, pos):
if self.face_down:
card_loc = (CARD_CENTER[0] + CARD_SIZE[0], CARD_CENTER[1])
card_im = card_back
else:
card_loc = (CARD_CENTER[0] + CARD_SIZE[0] * RANKS.index(self.rank),
CARD_CENTER[1] + CARD_SIZE[1] * SUITS.index(self.suit))
card_im = card_images
card_pos = (pos[0] + CARD_CENTER[0], pos[1] + CARD_CENTER[1])
canvas.draw_image(card_im, # source image
card_loc, CARD_SIZE, # position in source image
card_pos, CARD_SIZE) # position on canvas
def is_ace(card):
return card.rank == 'A'
# define hand class
class Hand:
def __init__(self):
self.hole_card = None
self.cards = []
self.name = ''
def __str__(self):
return (self.name + ':' +
'[' + ', '.join(map(str, self.cards)) + ']')
def add_card(self, card, face_down=False):
card.face_down = face_down
self.cards.append(card)
print 'Card added to', self
def get_value(self):
base_value = sum([VALUES[card.rank] for card in self.cards])
has_ace = len(filter(is_ace, self.cards)) > 0
if has_ace and base_value + 10 < BUST:
total_value = base_value + 10
else:
total_value = base_value
return total_value
def set_name(self, name):
self.name = name
def draw(self, canvas, pos):
x, y = pos
x_off, y_off = CARD_OFFSET
for i, card in enumerate(self.cards):
card_pos = x + i * x_off, y + i * y_off
card.draw(canvas, card_pos)
def turn_over_cards(self):
for c in self.cards:
c.face_down = False
# define deck class
class Deck:
def __init__(self):
self.cards = [Card(suit, rank) for suit in SUITS for rank in RANKS]
def shuffle(self):
random.shuffle(self.cards)
def deal_card(self):
return self.cards.pop()
def __str__(self):
return '/'.join(map(str, self.cards))
#define event handlers for buttons
def deal():
global outcome, in_play, score
global deck
global dealer, player
if in_play:
outcome = 'You forfeit the previous round! Hit or stand?'
print outcome
score -= 1
else:
outcome = 'Hit or stand?'
in_play = True
deck = Deck()
deck.shuffle()
dealer = Hand()
dealer.set_name('Dealer')
dealer.add_card(deck.deal_card(), face_down=True)
dealer.add_card(deck.deal_card())
player = Hand()
player.set_name('Player')
player.add_card(deck.deal_card())
player.add_card(deck.deal_card())
print dealer
print player
def hit():
global outcome, in_play, score
global deck, player
if in_play:
player.add_card(deck.deal_card())
if player.get_value() >= BUST:
in_play = False
outcome = 'You have busted! Deal again?'
print outcome
score -= 1
def stand():
global in_play, outcome, score
global player, dealer, deck
if not in_play:
outcome = ("That's like resting a case after the verdict. "
"Deal again?")
print outcome
return
dealer.turn_over_cards()
while dealer.get_value() < DEALER_STAND:
dealer.add_card(deck.deal_card())
if dealer.get_value() < player.get_value():
outcome = 'You win! Deal again?'
score += 1
elif dealer.get_value() >= BUST:
outcome = 'Dealer busts! Deal again?'
score += 1 # ?
else:
outcome = 'Dealer wins! Deal again?'
score -= 1
print outcome
in_play = False
def hand_position(hand):
'''Find x-position of first card so hand is centered.'''
ncards = len(hand.cards)
pos = round(300.0 - ncards * CARD_OFFSET[0] / 2)
return pos
def draw_title(canvas):
global frame
title = 'Blackjack!'
w = frame.get_canvas_textwidth(title, 36, 'serif')
canvas.draw_text(title, (600 - w, 583), 36, 'Black', 'serif')
canvas.draw_text(title, (600 - w - 3, 580), 36, 'Red', 'serif')
def draw_message(canvas):
global frame, outcome
w = frame.get_canvas_textwidth(outcome, 24, 'sans-serif')
canvas.draw_text(outcome,
(300 - w // 2, 300),
24, 'Black', 'sans-serif')
def draw_score(canvas):
global frame, score
score_str = 'Score %i' % score
w = frame.get_canvas_textwidth(score_str, 24, 'sans-serif')
canvas.draw_text(score_str, (600 - w, 24), 24, 'Black', 'sans-serif')
# draw handler
def draw(canvas):
# draw players
global dealer, player
dealer_x = hand_position(dealer)
dealer.draw(canvas, [dealer_x, 52])
player_x = hand_position(player)
player.draw(canvas, [player_x, 448])
draw_title(canvas)
draw_message(canvas)
draw_score(canvas)
# initialization frame
frame = simplegui.create_frame("Blackjack", 600, 600)
frame.set_canvas_background("Green")
#create buttons and canvas callback
frame.add_button("Deal", deal, 200)
frame.add_button("Hit", hit, 200)
frame.add_button("Stand", stand, 200)
frame.set_draw_handler(draw)
# get things rolling
deal()
frame.start()
# remember to review the gradic rubric
|
|
import argparse
import calendar
import datetime as dt
import time
import tabulate
import pykafka
from pykafka.protocol import PartitionOffsetCommitRequest
#
# Helper Functions
#
def fetch_offsets(client, topic, offset):
"""Fetch raw offset data from a topic.
:param client: KafkaClient connected to the cluster.
:type client: :class:`pykafka.KafkaClient`
:param topic: Name of the topic.
:type topic: :class:`pykafka.topic.Topic`
:param offset: Offset to reset to. Can be earliest, latest or a datetime.
Using a datetime will reset the offset to the latest message published
*before* the datetime.
:type offset: :class:`pykafka.common.OffsetType` or
:class:`datetime.datetime`
:returns: {partition_id: :class:`pykafka.protocol.OffsetPartitionResponse`}
"""
if offset.lower() == 'earliest':
return topic.earliest_available_offsets()
elif offset.lower() == 'latest':
return topic.latest_available_offsets()
else:
offset = dt.datetime.strptime(offset, "%Y-%m-%dT%H:%M:%S")
offset = int(calendar.timegm(offset.utctimetuple())*1000)
return topic.fetch_offset_limits(offset)
def fetch_consumer_lag(client, topic, consumer_group):
"""Get raw lag data for a topic/consumer group.
:param client: KafkaClient connected to the cluster.
:type client: :class:`pykafka.KafkaClient`
:param topic: Name of the topic.
:type topic: :class:`pykafka.topic.Topic`
:param consumer_group: Name of the consumer group to reset offsets for.
:type consumer_groups: :class:`str`
:returns: dict of {partition_id: (latest_offset, consumer_offset)}
"""
latest_offsets = fetch_offsets(client, topic, 'latest')
consumer = topic.get_simple_consumer(consumer_group=consumer_group,
auto_start=False)
current_offsets = consumer.fetch_offsets()
return {p_id: (latest_offsets[p_id].offset[0], res.offset)
for p_id, res in current_offsets}
#
# Commands
#
def desc_topic(client, args):
"""Print detailed information about a topic.
:param client: KafkaClient connected to the cluster.
:type client: :class:`pykafka.KafkaClient`
:param topic: Name of the topic.
:type topic: :class:`str`
"""
# Don't auto-create topics.
if args.topic not in client.topics:
raise ValueError('Topic {} does not exist.'.format(args.topic))
topic = client.topics[args.topic]
print 'Topic: {}'.format(topic.name)
print 'Partitions: {}'.format(len(topic.partitions))
print 'Replicas: {}'.format(len(topic.partitions.values()[0].replicas))
print tabulate.tabulate(
[(p.id, p.leader.id, [r.id for r in p.replicas], [r.id for r in p.isr])
for p in topic.partitions.values()],
headers=['Partition', 'Leader', 'Replicas', 'ISR'],
numalign='center',
)
def print_consumer_lag(client, args):
"""Print lag for a topic/consumer group.
:param client: KafkaClient connected to the cluster.
:type client: :class:`pykafka.KafkaClient`
:param topic: Name of the topic.
:type topic: :class:`str`
:param consumer_group: Name of the consumer group to reset offsets for.
:type consumer_groups: :class:`str`
"""
# Don't auto-create topics.
if args.topic not in client.topics:
raise ValueError('Topic {} does not exist.'.format(args.topic))
topic = client.topics[args.topic]
lag_info = fetch_consumer_lag(client, topic, args.consumer_group)
lag_info = [(k, '{:,}'.format(v[0] - v[1]), v[0], v[1])
for k, v in lag_info.iteritems()]
print tabulate.tabulate(
lag_info,
headers=['Partition', 'Lag', 'Latest Offset', 'Current Offset'],
numalign='center',
)
total = sum(int(i[1].replace(',', '')) for i in lag_info)
print '\n Total lag: {:,} messages.'.format(total)
def print_offsets(client, args):
"""Print offsets for a topic/consumer group.
NOTE: Time-based offset lookups are not precise, but are based on segment
boundaries. If there is only one segment, as when Kafka has just
started, the only offsets found will be [0, <latest_offset>].
:param client: KafkaClient connected to the cluster.
:type client: :class:`pykafka.KafkaClient`
:param topic: Name of the topic.
:type topic: :class:`str`
:param offset: Offset to reset to. Can be earliest, latest or a datetime.
Using a datetime will reset the offset to the latest message published
*before* the datetime.
:type offset: :class:`pykafka.common.OffsetType` or
:class:`datetime.datetime`
"""
# Don't auto-create topics.
if args.topic not in client.topics:
raise ValueError('Topic {} does not exist.'.format(args.topic))
topic = client.topics[args.topic]
offsets = fetch_offsets(client, topic, args.offset)
print tabulate.tabulate(
[(k, v.offset[0]) for k, v in offsets.iteritems()],
headers=['Partition', 'Offset'],
numalign='center',
)
def print_topics(client, args):
"""Print all topics in the cluster.
:param client: KafkaClient connected to the cluster.
:type client: :class:`pykafka.KafkaClient`
"""
print tabulate.tabulate(
[(t.name,
len(t.partitions),
len(t.partitions.values()[0].replicas) - 1)
for t in client.topics.values()],
headers=['Topic', 'Partitions', 'Replication'],
numalign='center',
)
def reset_offsets(client, args):
"""Reset offset for a topic/consumer group.
NOTE: Time-based offset lookups are not precise, but are based on segment
boundaries. If there is only one segment, as when Kafka has just
started, the only offsets found will be [0, <latest_offset>].
:param client: KafkaClient connected to the cluster.
:type client: :class:`pykafka.KafkaClient`
:param topic: Name of the topic.
:type topic: :class:`str`
:param consumer_group: Name of the consumer group to reset offsets for.
:type consumer_groups: :class:`str`
:param offset: Offset to reset to. Can be earliest, latest or a datetime.
Using a datetime will reset the offset to the latest message published
*before* the datetime.
:type offset: :class:`pykafka.common.OffsetType` or
:class:`datetime.datetime`
"""
# Don't auto-create topics.
if args.topic not in client.topics:
raise ValueError('Topic {} does not exist.'.format(args.topic))
topic = client.topics[args.topic]
# Build offset commit requests.
offsets = fetch_offsets(client, topic, args.offset)
tmsp = int(time.time() * 1000)
reqs = [PartitionOffsetCommitRequest(topic.name,
partition_id,
res.offset[0],
tmsp,
'kafka-tools')
for partition_id, res in offsets.iteritems()]
# Send them to the appropriate broker.
broker = client.cluster.get_offset_manager(args.consumer_group)
broker.commit_consumer_group_offsets(
args.consumer_group, 1, 'kafka-tools', reqs
)
def _add_consumer_group(parser):
"""Add consumer_group to arg parser."""
parser.add_argument('consumer_group',
metavar='CONSUMER_GROUP',
help='Consumer group name.')
def _add_offset(parser):
"""Add offset to arg parser."""
parser.add_argument('offset',
metavar='OFFSET',
type=str,
help='Offset to fetch. Can be EARLIEST, LATEST, or a '
'datetime in the format YYYY-MM-DDTHH:MM:SS.')
def _add_topic(parser):
"""Add topic to arg parser."""
parser.add_argument('topic',
metavar='TOPIC',
help='Topic name.')
def _get_arg_parser():
output = argparse.ArgumentParser(description='Tools for Kafka.')
# Common arguments
output.add_argument('-b', '--broker',
required=False,
default='localhost:9092',
dest='host',
help='host:port of any Kafka broker. '
'[default: localhost:9092]')
subparsers = output.add_subparsers(help='Commands', dest='command')
# Desc Topic
parser = subparsers.add_parser(
'desc_topic',
help='Print detailed info for a topic.'
)
parser.set_defaults(func=desc_topic)
_add_topic(parser)
# Print Consumer Lag
parser = subparsers.add_parser(
'print_consumer_lag',
help='Get consumer lag for a topic.'
)
parser.set_defaults(func=print_consumer_lag)
_add_topic(parser)
_add_consumer_group(parser)
# Print Offsets
parser = subparsers.add_parser(
'print_offsets',
help='Fetch offsets for a topic/consumer group'
)
parser.set_defaults(func=print_offsets)
_add_topic(parser)
_add_offset(parser)
# Print Topics
parser = subparsers.add_parser(
'print_topics',
help='Print information about all topics in the cluster.'
)
parser.set_defaults(func=print_topics)
# Reset Offsets
parser = subparsers.add_parser(
'reset_offsets',
help='Reset offsets for a topic/consumer group'
)
parser.set_defaults(func=reset_offsets)
_add_topic(parser)
_add_consumer_group(parser)
_add_offset(parser)
return output
def main():
parser = _get_arg_parser()
args = parser.parse_args()
client = pykafka.KafkaClient(hosts=args.host)
args.func(client, args)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.